Print this page
9695 Slow crash dumps, significantly slower than live core
Reviewed by: Dan Fields <dan.fields@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Toomas Soome <tsoome@me.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c
+++ new/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c
1 1 /*
2 2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 3 * i.e. Thunderbolt and Invader
4 4 *
5 5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 6 * Copyright (c) 2008-2012, LSI Logic Corporation.
7 7 * All rights reserved.
8 8 *
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
9 9 * Version:
10 10 * Author:
11 11 * Swaminathan K S
12 12 * Arun Chandrashekhar
13 13 * Manju R
14 14 * Rasheed
15 15 * Shakeel Bukhari
16 16 */
17 17
18 18 /*
19 - * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
19 + * Copyright 2018 Nexenta Systems, Inc.
20 20 * Copyright 2015, 2017 Citrus IT Limited. All rights reserved.
21 21 * Copyright 2015 Garrett D'Amore <garrett@damore.org>
22 22 */
23 23
24 24
25 25 #include <sys/types.h>
26 26 #include <sys/file.h>
27 27 #include <sys/atomic.h>
28 28 #include <sys/scsi/scsi.h>
29 29 #include <sys/byteorder.h>
30 30 #include <sys/sdt.h>
31 31 #include "ld_pd_map.h"
32 32 #include "mr_sas.h"
33 33 #include "fusion.h"
34 34
35 35 /*
36 36 * FMA header files
37 37 */
38 38 #include <sys/ddifm.h>
39 39 #include <sys/fm/protocol.h>
40 40 #include <sys/fm/util.h>
41 41 #include <sys/fm/io/ddi.h>
42 42
43 43
44 44 /* Pre-TB command size and TB command size. */
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
45 45 #define MR_COMMAND_SIZE (64*20) /* 1280 bytes */
46 46 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
47 47 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
48 48 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
49 49 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
50 50 extern ddi_dma_attr_t mrsas_generic_dma_attr;
51 51 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
52 52 extern struct ddi_device_acc_attr endian_attr;
53 53 extern int debug_level_g;
54 54 extern unsigned int enable_fp;
55 -volatile int dump_io_wait_time = 90;
55 +volatile int dump_io_wait_time = 900;
56 56 extern volatile int debug_timeout_g;
57 57 extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
58 58 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
59 59 extern void push_pending_mfi_pkt(struct mrsas_instance *,
60 60 struct mrsas_cmd *);
61 61 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
62 62 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
63 63
64 64 /* Local static prototypes. */
65 65 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
66 66 struct scsi_address *, struct scsi_pkt *, uchar_t *);
67 67 static void mrsas_tbolt_set_pd_lba(U8 *, size_t, uint8_t *, U64, U32);
68 68 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
69 69 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
70 70 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
71 71 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
72 72 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
73 73 struct mrsas_tbolt_pd_info *, int);
74 74
75 75 static int mrsas_debug_tbolt_fw_faults_after_ocr = 0;
76 76
77 77 /*
78 78 * destroy_mfi_mpi_frame_pool
79 79 */
80 80 void
81 81 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
82 82 {
83 83 int i;
84 84
85 85 struct mrsas_cmd *cmd;
86 86
87 87 /* return all mfi frames to pool */
88 88 for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
89 89 cmd = instance->cmd_list[i];
90 90 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
91 91 (void) mrsas_free_dma_obj(instance,
92 92 cmd->frame_dma_obj);
93 93 }
94 94 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
95 95 }
96 96 }
97 97
98 98 /*
99 99 * destroy_mpi2_frame_pool
100 100 */
101 101 void
102 102 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
103 103 {
104 104
105 105 if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
106 106 (void) mrsas_free_dma_obj(instance,
107 107 instance->mpi2_frame_pool_dma_obj);
108 108 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
109 109 }
110 110 }
111 111
112 112
113 113 /*
114 114 * mrsas_tbolt_free_additional_dma_buffer
115 115 */
116 116 void
117 117 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
118 118 {
119 119 int i;
120 120
121 121 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
122 122 (void) mrsas_free_dma_obj(instance,
123 123 instance->mfi_internal_dma_obj);
124 124 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
125 125 }
126 126 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
127 127 (void) mrsas_free_dma_obj(instance,
128 128 instance->mfi_evt_detail_obj);
129 129 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
130 130 }
131 131
132 132 for (i = 0; i < 2; i++) {
133 133 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
134 134 (void) mrsas_free_dma_obj(instance,
135 135 instance->ld_map_obj[i]);
136 136 instance->ld_map_obj[i].status = DMA_OBJ_FREED;
137 137 }
138 138 }
139 139 }
140 140
141 141
142 142 /*
143 143 * free_req_desc_pool
144 144 */
145 145 void
146 146 free_req_rep_desc_pool(struct mrsas_instance *instance)
147 147 {
148 148 if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
149 149 (void) mrsas_free_dma_obj(instance,
150 150 instance->request_desc_dma_obj);
151 151 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
152 152 }
153 153
154 154 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
155 155 (void) mrsas_free_dma_obj(instance,
156 156 instance->reply_desc_dma_obj);
157 157 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
158 158 }
159 159
160 160
161 161 }
162 162
163 163
164 164 /*
165 165 * ThunderBolt(TB) Request Message Frame Pool
166 166 */
167 167 int
168 168 create_mpi2_frame_pool(struct mrsas_instance *instance)
169 169 {
170 170 int i = 0;
171 171 uint16_t max_cmd;
172 172 uint32_t sgl_sz;
173 173 uint32_t raid_msg_size;
174 174 uint32_t total_size;
175 175 uint32_t offset;
176 176 uint32_t io_req_base_phys;
177 177 uint8_t *io_req_base;
178 178 struct mrsas_cmd *cmd;
179 179
180 180 max_cmd = instance->max_fw_cmds;
181 181
182 182 sgl_sz = 1024;
183 183 raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
184 184
185 185 /* Allocating additional 256 bytes to accomodate SMID 0. */
186 186 total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
187 187 (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
188 188
189 189 con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
190 190 "max_cmd %x", max_cmd));
191 191
192 192 con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
193 193 "request message frame pool size %x", total_size));
194 194
195 195 /*
196 196 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
197 197 * and then split the memory to 1024 commands. Each command should be
198 198 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
199 199 * within it. Further refer the "alloc_req_rep_desc" function where
200 200 * we allocate request/reply descriptors queues for a clue.
201 201 */
202 202
203 203 instance->mpi2_frame_pool_dma_obj.size = total_size;
204 204 instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
205 205 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
206 206 0xFFFFFFFFU;
207 207 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
208 208 0xFFFFFFFFU;
209 209 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
210 210 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
211 211
212 212 if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
213 213 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
214 214 dev_err(instance->dip, CE_WARN,
215 215 "could not alloc mpi2 frame pool");
216 216 return (DDI_FAILURE);
217 217 }
218 218
219 219 bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
220 220 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
221 221
222 222 instance->io_request_frames =
223 223 (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
224 224 instance->io_request_frames_phy =
225 225 (uint32_t)
226 226 instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
227 227
228 228 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
229 229 (void *)instance->io_request_frames));
230 230
231 231 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
232 232 instance->io_request_frames_phy));
233 233
234 234 io_req_base = (uint8_t *)instance->io_request_frames +
235 235 MRSAS_THUNDERBOLT_MSG_SIZE;
236 236 io_req_base_phys = instance->io_request_frames_phy +
237 237 MRSAS_THUNDERBOLT_MSG_SIZE;
238 238
239 239 con_log(CL_DLEVEL3, (CE_NOTE,
240 240 "io req_base_phys 0x%x", io_req_base_phys));
241 241
242 242 for (i = 0; i < max_cmd; i++) {
243 243 cmd = instance->cmd_list[i];
244 244
245 245 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
246 246
247 247 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
248 248 ((uint8_t *)io_req_base + offset);
249 249 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
250 250
251 251 cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
252 252 (max_cmd * raid_msg_size) + i * sgl_sz);
253 253
254 254 cmd->sgl_phys_addr = (io_req_base_phys +
255 255 (max_cmd * raid_msg_size) + i * sgl_sz);
256 256
257 257 cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
258 258 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
259 259 (i * SENSE_LENGTH));
260 260
261 261 cmd->sense_phys_addr1 = (io_req_base_phys +
262 262 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
263 263 (i * SENSE_LENGTH));
264 264
265 265
266 266 cmd->SMID = i + 1;
267 267
268 268 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
269 269 cmd->index, (void *)cmd->scsi_io_request));
270 270
271 271 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
272 272 cmd->index, cmd->scsi_io_request_phys_addr));
273 273
274 274 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
275 275 cmd->index, (void *)cmd->sense1));
276 276
277 277 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
278 278 cmd->index, cmd->sense_phys_addr1));
279 279
280 280 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
281 281 cmd->index, (void *)cmd->sgl));
282 282
283 283 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
284 284 cmd->index, cmd->sgl_phys_addr));
285 285 }
286 286
287 287 return (DDI_SUCCESS);
288 288
289 289 }
290 290
291 291
292 292 /*
293 293 * alloc_additional_dma_buffer for AEN
294 294 */
295 295 int
296 296 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
297 297 {
298 298 uint32_t internal_buf_size = PAGESIZE*2;
299 299 int i;
300 300
301 301 /* Initialize buffer status as free */
302 302 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
303 303 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
304 304 instance->ld_map_obj[0].status = DMA_OBJ_FREED;
305 305 instance->ld_map_obj[1].status = DMA_OBJ_FREED;
306 306
307 307
308 308 instance->mfi_internal_dma_obj.size = internal_buf_size;
309 309 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
310 310 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
311 311 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
312 312 0xFFFFFFFFU;
313 313 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
314 314
315 315 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
316 316 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
317 317 dev_err(instance->dip, CE_WARN,
318 318 "could not alloc reply queue");
319 319 return (DDI_FAILURE);
320 320 }
321 321
322 322 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
323 323
324 324 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
325 325 instance->internal_buf =
326 326 (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
327 327 instance->internal_buf_dmac_add =
328 328 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
329 329 instance->internal_buf_size = internal_buf_size;
330 330
331 331 /* allocate evt_detail */
332 332 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
333 333 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
334 334 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
335 335 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
336 336 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
337 337 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
338 338
339 339 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
340 340 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
341 341 dev_err(instance->dip, CE_WARN,
342 342 "mrsas_tbolt_alloc_additional_dma_buffer: "
343 343 "could not allocate data transfer buffer.");
344 344 goto fail_tbolt_additional_buff;
345 345 }
346 346
347 347 bzero(instance->mfi_evt_detail_obj.buffer,
348 348 sizeof (struct mrsas_evt_detail));
349 349
350 350 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
351 351
352 352 instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
353 353 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
354 354
355 355 for (i = 0; i < 2; i++) {
356 356 /* allocate the data transfer buffer */
357 357 instance->ld_map_obj[i].size = instance->size_map_info;
358 358 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
359 359 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
360 360 instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
361 361 0xFFFFFFFFU;
362 362 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
363 363 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
364 364
365 365 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
366 366 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
367 367 dev_err(instance->dip, CE_WARN,
368 368 "could not allocate data transfer buffer.");
369 369 goto fail_tbolt_additional_buff;
370 370 }
371 371
372 372 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
373 373
374 374 bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
375 375
376 376 instance->ld_map[i] =
377 377 (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
378 378 instance->ld_map_phy[i] = (uint32_t)instance->
379 379 ld_map_obj[i].dma_cookie[0].dmac_address;
380 380
381 381 con_log(CL_DLEVEL3, (CE_NOTE,
382 382 "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
383 383
384 384 con_log(CL_DLEVEL3, (CE_NOTE,
385 385 "size_map_info 0x%x", instance->size_map_info));
386 386 }
387 387
388 388 return (DDI_SUCCESS);
389 389
390 390 fail_tbolt_additional_buff:
391 391 mrsas_tbolt_free_additional_dma_buffer(instance);
392 392
393 393 return (DDI_FAILURE);
394 394 }
395 395
396 396 MRSAS_REQUEST_DESCRIPTOR_UNION *
397 397 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
398 398 {
399 399 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
400 400
401 401 if (index > instance->max_fw_cmds) {
402 402 con_log(CL_ANN1, (CE_NOTE,
403 403 "Invalid SMID 0x%x request for descriptor", index));
404 404 con_log(CL_ANN1, (CE_NOTE,
405 405 "max_fw_cmds : 0x%x", instance->max_fw_cmds));
406 406 return (NULL);
407 407 }
408 408
409 409 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
410 410 ((char *)instance->request_message_pool +
411 411 (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
412 412
413 413 con_log(CL_ANN1, (CE_NOTE,
414 414 "request descriptor : 0x%08lx", (unsigned long)req_desc));
415 415
416 416 con_log(CL_ANN1, (CE_NOTE,
417 417 "request descriptor base phy : 0x%08lx",
418 418 (unsigned long)instance->request_message_pool_phy));
419 419
420 420 return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
421 421 }
422 422
423 423
424 424 /*
425 425 * Allocate Request and Reply Queue Descriptors.
426 426 */
427 427 int
428 428 alloc_req_rep_desc(struct mrsas_instance *instance)
429 429 {
430 430 uint32_t request_q_sz, reply_q_sz;
431 431 int i, max_reply_q_sz;
432 432 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
433 433
434 434 /*
435 435 * ThunderBolt(TB) There's no longer producer consumer mechanism.
436 436 * Once we have an interrupt we are supposed to scan through the list of
437 437 * reply descriptors and process them accordingly. We would be needing
438 438 * to allocate memory for 1024 reply descriptors
439 439 */
440 440
441 441 /* Allocate Reply Descriptors */
442 442 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
443 443 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
444 444
445 445 /* reply queue size should be multiple of 16 */
446 446 max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
447 447
448 448 reply_q_sz = 8 * max_reply_q_sz;
449 449
450 450
451 451 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
452 452 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
453 453
454 454 instance->reply_desc_dma_obj.size = reply_q_sz;
455 455 instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
456 456 instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
457 457 instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
458 458 instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
459 459 instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
460 460
461 461 if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
462 462 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
463 463 dev_err(instance->dip, CE_WARN, "could not alloc reply queue");
464 464 return (DDI_FAILURE);
465 465 }
466 466
467 467 bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
468 468 instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
469 469
470 470 /* virtual address of reply queue */
471 471 instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
472 472 instance->reply_desc_dma_obj.buffer);
473 473
474 474 instance->reply_q_depth = max_reply_q_sz;
475 475
476 476 con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
477 477 instance->reply_q_depth));
478 478
479 479 con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
480 480 (void *)instance->reply_frame_pool));
481 481
482 482 /* initializing reply address to 0xFFFFFFFF */
483 483 reply_desc = instance->reply_frame_pool;
484 484
485 485 for (i = 0; i < instance->reply_q_depth; i++) {
486 486 reply_desc->Words = (uint64_t)~0;
487 487 reply_desc++;
488 488 }
489 489
490 490
491 491 instance->reply_frame_pool_phy =
492 492 (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
493 493
494 494 con_log(CL_ANN1, (CE_NOTE,
495 495 "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
496 496
497 497
498 498 instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
499 499 reply_q_sz);
500 500
501 501 con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
502 502 instance->reply_pool_limit_phy));
503 503
504 504
505 505 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
506 506 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
507 507
508 508 /* Allocate Request Descriptors */
509 509 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
510 510 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
511 511
512 512 request_q_sz = 8 *
513 513 (instance->max_fw_cmds);
514 514
515 515 instance->request_desc_dma_obj.size = request_q_sz;
516 516 instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
517 517 instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
518 518 instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
519 519 0xFFFFFFFFU;
520 520 instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
521 521 instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
522 522
523 523 if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
524 524 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
525 525 dev_err(instance->dip, CE_WARN,
526 526 "could not alloc request queue desc");
527 527 goto fail_undo_reply_queue;
528 528 }
529 529
530 530 bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
531 531 instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
532 532
533 533 /* virtual address of request queue desc */
534 534 instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
535 535 (instance->request_desc_dma_obj.buffer);
536 536
537 537 instance->request_message_pool_phy =
538 538 (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
539 539
540 540 return (DDI_SUCCESS);
541 541
542 542 fail_undo_reply_queue:
543 543 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
544 544 (void) mrsas_free_dma_obj(instance,
545 545 instance->reply_desc_dma_obj);
546 546 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
547 547 }
548 548
549 549 return (DDI_FAILURE);
550 550 }
551 551
552 552 /*
553 553 * mrsas_alloc_cmd_pool_tbolt
554 554 *
555 555 * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
556 556 * routine
557 557 */
558 558 int
559 559 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
560 560 {
561 561 int i;
562 562 int count;
563 563 uint32_t max_cmd;
564 564 uint32_t reserve_cmd;
565 565 size_t sz;
566 566
567 567 struct mrsas_cmd *cmd;
568 568
569 569 max_cmd = instance->max_fw_cmds;
570 570 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
571 571 "max_cmd %x", max_cmd));
572 572
573 573
574 574 sz = sizeof (struct mrsas_cmd *) * max_cmd;
575 575
576 576 /*
577 577 * instance->cmd_list is an array of struct mrsas_cmd pointers.
578 578 * Allocate the dynamic array first and then allocate individual
579 579 * commands.
580 580 */
581 581 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
582 582
583 583 /* create a frame pool and assign one frame to each cmd */
584 584 for (count = 0; count < max_cmd; count++) {
585 585 instance->cmd_list[count] =
586 586 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
587 587 }
588 588
589 589 /* add all the commands to command pool */
590 590
591 591 INIT_LIST_HEAD(&instance->cmd_pool_list);
592 592 INIT_LIST_HEAD(&instance->cmd_pend_list);
593 593 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
594 594
595 595 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
596 596
597 597 /* cmd index 0 reservered for IOC INIT */
598 598 for (i = 1; i < reserve_cmd; i++) {
599 599 cmd = instance->cmd_list[i];
600 600 cmd->index = i;
601 601 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
602 602 }
603 603
604 604
605 605 for (i = reserve_cmd; i < max_cmd; i++) {
606 606 cmd = instance->cmd_list[i];
607 607 cmd->index = i;
608 608 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
609 609 }
610 610
611 611 return (DDI_SUCCESS);
612 612
613 613 mrsas_undo_cmds:
614 614 if (count > 0) {
615 615 /* free each cmd */
616 616 for (i = 0; i < count; i++) {
617 617 if (instance->cmd_list[i] != NULL) {
618 618 kmem_free(instance->cmd_list[i],
619 619 sizeof (struct mrsas_cmd));
620 620 }
621 621 instance->cmd_list[i] = NULL;
622 622 }
623 623 }
624 624
625 625 mrsas_undo_cmd_list:
626 626 if (instance->cmd_list != NULL)
627 627 kmem_free(instance->cmd_list, sz);
628 628 instance->cmd_list = NULL;
629 629
630 630 return (DDI_FAILURE);
631 631 }
632 632
633 633
634 634 /*
635 635 * free_space_for_mpi2
636 636 */
637 637 void
638 638 free_space_for_mpi2(struct mrsas_instance *instance)
639 639 {
640 640 /* already freed */
641 641 if (instance->cmd_list == NULL) {
642 642 return;
643 643 }
644 644
645 645 /* First free the additional DMA buffer */
646 646 mrsas_tbolt_free_additional_dma_buffer(instance);
647 647
648 648 /* Free the request/reply descriptor pool */
649 649 free_req_rep_desc_pool(instance);
650 650
651 651 /* Free the MPI message pool */
652 652 destroy_mpi2_frame_pool(instance);
653 653
654 654 /* Free the MFI frame pool */
655 655 destroy_mfi_frame_pool(instance);
656 656
657 657 /* Free all the commands in the cmd_list */
658 658 /* Free the cmd_list buffer itself */
659 659 mrsas_free_cmd_pool(instance);
660 660 }
661 661
662 662
663 663 /*
664 664 * ThunderBolt(TB) memory allocations for commands/messages/frames.
665 665 */
666 666 int
667 667 alloc_space_for_mpi2(struct mrsas_instance *instance)
668 668 {
669 669 /* Allocate command pool (memory for cmd_list & individual commands) */
670 670 if (mrsas_alloc_cmd_pool_tbolt(instance)) {
671 671 dev_err(instance->dip, CE_WARN, "Error creating cmd pool");
672 672 return (DDI_FAILURE);
673 673 }
674 674
675 675 /* Initialize single reply size and Message size */
676 676 instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
677 677 instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
678 678
679 679 instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
680 680 (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
681 681 sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
682 682 instance->max_sge_in_chain = (MR_COMMAND_SIZE -
683 683 MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
684 684
685 685 /* Reduce SG count by 1 to take care of group cmds feature in FW */
686 686 instance->max_num_sge = (instance->max_sge_in_main_msg +
687 687 instance->max_sge_in_chain - 2);
688 688 instance->chain_offset_mpt_msg =
689 689 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
690 690 instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
691 691 sizeof (MPI2_SGE_IO_UNION)) / 16;
692 692 instance->reply_read_index = 0;
693 693
694 694
695 695 /* Allocate Request and Reply descriptors Array */
696 696 /* Make sure the buffer is aligned to 8 for req/rep descriptor Pool */
697 697 if (alloc_req_rep_desc(instance)) {
698 698 dev_err(instance->dip, CE_WARN,
699 699 "Error, allocating memory for descripter-pool");
700 700 goto mpi2_undo_cmd_pool;
701 701 }
702 702 con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
703 703 instance->request_message_pool_phy));
704 704
705 705
706 706 /* Allocate MFI Frame pool - for MPI-MFI passthru commands */
707 707 if (create_mfi_frame_pool(instance)) {
708 708 dev_err(instance->dip, CE_WARN,
709 709 "Error, allocating memory for MFI frame-pool");
710 710 goto mpi2_undo_descripter_pool;
711 711 }
712 712
713 713
714 714 /* Allocate MPI2 Message pool */
715 715 /*
716 716 * Make sure the buffer is alligned to 256 for raid message packet
717 717 * create a io request pool and assign one frame to each cmd
718 718 */
719 719
720 720 if (create_mpi2_frame_pool(instance)) {
721 721 dev_err(instance->dip, CE_WARN,
722 722 "Error, allocating memory for MPI2 Message-pool");
723 723 goto mpi2_undo_mfi_frame_pool;
724 724 }
725 725
726 726 #ifdef DEBUG
727 727 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
728 728 instance->max_sge_in_main_msg));
729 729 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
730 730 instance->max_sge_in_chain));
731 731 con_log(CL_ANN1, (CE_CONT,
732 732 "[max_sge]0x%x", instance->max_num_sge));
733 733 con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
734 734 instance->chain_offset_mpt_msg));
735 735 con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
736 736 instance->chain_offset_io_req));
737 737 #endif
738 738
739 739
740 740 /* Allocate additional dma buffer */
741 741 if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
742 742 dev_err(instance->dip, CE_WARN,
743 743 "Error, allocating tbolt additional DMA buffer");
744 744 goto mpi2_undo_message_pool;
745 745 }
746 746
747 747 return (DDI_SUCCESS);
748 748
749 749 mpi2_undo_message_pool:
750 750 destroy_mpi2_frame_pool(instance);
751 751
752 752 mpi2_undo_mfi_frame_pool:
753 753 destroy_mfi_frame_pool(instance);
754 754
755 755 mpi2_undo_descripter_pool:
756 756 free_req_rep_desc_pool(instance);
757 757
758 758 mpi2_undo_cmd_pool:
759 759 mrsas_free_cmd_pool(instance);
760 760
761 761 return (DDI_FAILURE);
762 762 }
763 763
764 764
765 765 /*
766 766 * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
767 767 */
768 768 int
769 769 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
770 770 {
771 771
772 772 /*
773 773 * Reduce the max supported cmds by 1. This is to ensure that the
774 774 * reply_q_sz (1 more than the max cmd that driver may send)
775 775 * does not exceed max cmds that the FW can support
776 776 */
777 777
778 778 if (instance->max_fw_cmds > 1008) {
779 779 instance->max_fw_cmds = 1008;
780 780 instance->max_fw_cmds = instance->max_fw_cmds-1;
781 781 }
782 782
783 783 con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
784 784 "instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
785 785
786 786
787 787 /* create a pool of commands */
788 788 if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
789 789 dev_err(instance->dip, CE_WARN,
790 790 "alloc_space_for_mpi2() failed.");
791 791
792 792 return (DDI_FAILURE);
793 793 }
794 794
795 795 /* Send ioc init message */
796 796 /* NOTE: the issue_init call does FMA checking already. */
797 797 if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
798 798 dev_err(instance->dip, CE_WARN,
799 799 "mrsas_issue_init_mpi2() failed.");
800 800
801 801 goto fail_init_fusion;
802 802 }
803 803
804 804 instance->unroll.alloc_space_mpi2 = 1;
805 805
806 806 con_log(CL_ANN, (CE_NOTE,
807 807 "mrsas_init_adapter_tbolt: SUCCESSFUL"));
808 808
809 809 return (DDI_SUCCESS);
810 810
811 811 fail_init_fusion:
812 812 free_space_for_mpi2(instance);
813 813
814 814 return (DDI_FAILURE);
815 815 }
816 816
817 817
818 818
819 819 /*
820 820 * init_mpi2
821 821 */
822 822 int
823 823 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
824 824 {
825 825 dma_obj_t init2_dma_obj;
826 826 int ret_val = DDI_SUCCESS;
827 827
828 828 /* allocate DMA buffer for IOC INIT message */
829 829 init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
830 830 init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
831 831 init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
832 832 init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
833 833 init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
834 834 init2_dma_obj.dma_attr.dma_attr_align = 256;
835 835
836 836 if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
837 837 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
838 838 dev_err(instance->dip, CE_WARN, "mr_sas_issue_init_mpi2 "
839 839 "could not allocate data transfer buffer.");
840 840 return (DDI_FAILURE);
841 841 }
842 842 (void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
843 843
844 844 con_log(CL_ANN1, (CE_NOTE,
845 845 "mrsas_issue_init_mpi2 _phys adr: %x",
846 846 init2_dma_obj.dma_cookie[0].dmac_address));
847 847
848 848
849 849 /* Initialize and send ioc init message */
850 850 ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
851 851 if (ret_val == DDI_FAILURE) {
852 852 con_log(CL_ANN1, (CE_WARN,
853 853 "mrsas_issue_init_mpi2: Failed"));
854 854 goto fail_init_mpi2;
855 855 }
856 856
857 857 /* free IOC init DMA buffer */
858 858 if (mrsas_free_dma_obj(instance, init2_dma_obj)
859 859 != DDI_SUCCESS) {
860 860 con_log(CL_ANN1, (CE_WARN,
861 861 "mrsas_issue_init_mpi2: Free Failed"));
862 862 return (DDI_FAILURE);
863 863 }
864 864
865 865 /* Get/Check and sync ld_map info */
866 866 instance->map_id = 0;
867 867 if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
868 868 (void) mrsas_tbolt_sync_map_info(instance);
869 869
870 870
871 871 /* No mrsas_cmd to send, so send NULL. */
872 872 if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
873 873 goto fail_init_mpi2;
874 874
875 875 con_log(CL_ANN, (CE_NOTE,
876 876 "mrsas_issue_init_mpi2: SUCCESSFUL"));
877 877
878 878 return (DDI_SUCCESS);
879 879
880 880 fail_init_mpi2:
881 881 (void) mrsas_free_dma_obj(instance, init2_dma_obj);
882 882
883 883 return (DDI_FAILURE);
884 884 }
885 885
886 886 static int
887 887 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
888 888 {
889 889 int numbytes;
890 890 uint16_t flags;
891 891 struct mrsas_init_frame2 *mfiFrameInit2;
892 892 struct mrsas_header *frame_hdr;
893 893 Mpi2IOCInitRequest_t *init;
894 894 struct mrsas_cmd *cmd = NULL;
895 895 struct mrsas_drv_ver drv_ver_info;
896 896 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
897 897 uint32_t timeout;
898 898
899 899 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
900 900
901 901
902 902 #ifdef DEBUG
903 903 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
904 904 (int)sizeof (*mfiFrameInit2)));
905 905 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
906 906 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
907 907 (int)sizeof (struct mrsas_init_frame2)));
908 908 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
909 909 (int)sizeof (Mpi2IOCInitRequest_t)));
910 910 #endif
911 911
912 912 init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
913 913 numbytes = sizeof (*init);
914 914 bzero(init, numbytes);
915 915
916 916 ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
917 917 MPI2_FUNCTION_IOC_INIT);
918 918
919 919 ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
920 920 MPI2_WHOINIT_HOST_DRIVER);
921 921
922 922 /* set MsgVersion and HeaderVersion host driver was built with */
923 923 ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
924 924 MPI2_VERSION);
925 925
926 926 ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
927 927 MPI2_HEADER_VERSION);
928 928
929 929 ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
930 930 instance->raid_io_msg_size / 4);
931 931
932 932 ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
933 933 0);
934 934
935 935 ddi_put16(mpi2_dma_obj->acc_handle,
936 936 &init->ReplyDescriptorPostQueueDepth,
937 937 instance->reply_q_depth);
938 938 /*
939 939 * These addresses are set using the DMA cookie addresses from when the
940 940 * memory was allocated. Sense buffer hi address should be 0.
941 941 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
942 942 */
943 943
944 944 ddi_put32(mpi2_dma_obj->acc_handle,
945 945 &init->SenseBufferAddressHigh, 0);
946 946
947 947 ddi_put64(mpi2_dma_obj->acc_handle,
948 948 (uint64_t *)&init->SystemRequestFrameBaseAddress,
949 949 instance->io_request_frames_phy);
950 950
951 951 ddi_put64(mpi2_dma_obj->acc_handle,
952 952 &init->ReplyDescriptorPostQueueAddress,
953 953 instance->reply_frame_pool_phy);
954 954
955 955 ddi_put64(mpi2_dma_obj->acc_handle,
956 956 &init->ReplyFreeQueueAddress, 0);
957 957
958 958 cmd = instance->cmd_list[0];
959 959 if (cmd == NULL) {
960 960 return (DDI_FAILURE);
961 961 }
962 962 cmd->retry_count_for_ocr = 0;
963 963 cmd->pkt = NULL;
964 964 cmd->drv_pkt_time = 0;
965 965
966 966 mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
967 967 con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
968 968
969 969 frame_hdr = &cmd->frame->hdr;
970 970
971 971 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
972 972 MFI_CMD_STATUS_POLL_MODE);
973 973
974 974 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
975 975
976 976 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
977 977
978 978 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
979 979
980 980 con_log(CL_ANN, (CE_CONT,
981 981 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
982 982
983 983 /* Init the MFI Header */
984 984 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
985 985 &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
986 986
987 987 con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
988 988
989 989 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
990 990 &mfiFrameInit2->cmd_status,
991 991 MFI_STAT_INVALID_STATUS);
992 992
993 993 con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
994 994
995 995 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
996 996 &mfiFrameInit2->queue_info_new_phys_addr_lo,
997 997 mpi2_dma_obj->dma_cookie[0].dmac_address);
998 998
999 999 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1000 1000 &mfiFrameInit2->data_xfer_len,
1001 1001 sizeof (Mpi2IOCInitRequest_t));
1002 1002
1003 1003 con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1004 1004 (int)init->ReplyDescriptorPostQueueAddress));
1005 1005
1006 1006 /* fill driver version information */
1007 1007 fill_up_drv_ver(&drv_ver_info);
1008 1008
1009 1009 /* allocate the driver version data transfer buffer */
1010 1010 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1011 1011 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1012 1012 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1013 1013 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1014 1014 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1015 1015 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1016 1016
1017 1017 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1018 1018 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1019 1019 dev_err(instance->dip, CE_WARN,
1020 1020 "fusion init: Could not allocate driver version buffer.");
1021 1021 return (DDI_FAILURE);
1022 1022 }
1023 1023 /* copy driver version to dma buffer */
1024 1024 bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1025 1025 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1026 1026 (uint8_t *)drv_ver_info.drv_ver,
1027 1027 (uint8_t *)instance->drv_ver_dma_obj.buffer,
1028 1028 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1029 1029
1030 1030 /* send driver version physical address to firmware */
1031 1031 ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1032 1032 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1033 1033
1034 1034 con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1035 1035 mfiFrameInit2->queue_info_new_phys_addr_lo,
1036 1036 (int)sizeof (Mpi2IOCInitRequest_t)));
1037 1037
1038 1038 con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1039 1039
1040 1040 con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1041 1041 cmd->scsi_io_request_phys_addr,
1042 1042 (int)sizeof (struct mrsas_init_frame2)));
1043 1043
1044 1044 /* disable interrupts before sending INIT2 frame */
1045 1045 instance->func_ptr->disable_intr(instance);
1046 1046
1047 1047 req_desc.Words = cmd->scsi_io_request_phys_addr;
1048 1048 req_desc.MFAIo.RequestFlags =
1049 1049 (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1050 1050
1051 1051 cmd->request_desc = &req_desc;
1052 1052
1053 1053 /* issue the init frame */
1054 1054
1055 1055 mutex_enter(&instance->reg_write_mtx);
1056 1056 WR_IB_LOW_QPORT((uint32_t)(req_desc.Words), instance);
1057 1057 WR_IB_HIGH_QPORT((uint32_t)(req_desc.Words >> 32), instance);
1058 1058 mutex_exit(&instance->reg_write_mtx);
1059 1059
1060 1060 con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1061 1061 con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ",
1062 1062 frame_hdr->cmd_status));
1063 1063
1064 1064 timeout = drv_usectohz(MFI_POLL_TIMEOUT_SECS * MICROSEC);
1065 1065 do {
1066 1066 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
1067 1067 &mfiFrameInit2->cmd_status) != MFI_CMD_STATUS_POLL_MODE)
1068 1068 break;
1069 1069 delay(1);
1070 1070 timeout--;
1071 1071 } while (timeout > 0);
1072 1072
1073 1073 if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1074 1074 &mfiFrameInit2->cmd_status) == 0) {
1075 1075 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1076 1076 } else {
1077 1077 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1078 1078 mrsas_dump_reply_desc(instance);
1079 1079 goto fail_ioc_init;
1080 1080 }
1081 1081
1082 1082 mrsas_dump_reply_desc(instance);
1083 1083
1084 1084 instance->unroll.verBuff = 1;
1085 1085
1086 1086 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1087 1087
1088 1088 return (DDI_SUCCESS);
1089 1089
1090 1090
1091 1091 fail_ioc_init:
1092 1092
1093 1093 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1094 1094
1095 1095 return (DDI_FAILURE);
1096 1096 }
1097 1097
1098 1098 int
1099 1099 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1100 1100 {
↓ open down ↓ |
1035 lines elided |
↑ open up ↑ |
1101 1101 int i;
1102 1102 uint32_t wait_time = dump_io_wait_time;
1103 1103 for (i = 0; i < wait_time; i++) {
1104 1104 /*
1105 1105 * Check For Outstanding poll Commands
1106 1106 * except ldsync command and aen command
1107 1107 */
1108 1108 if (instance->fw_outstanding <= 2) {
1109 1109 break;
1110 1110 }
1111 - drv_usecwait(10*MILLISEC);
1111 + drv_usecwait(MILLISEC);
1112 1112 /* complete commands from reply queue */
1113 1113 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1114 1114 }
1115 1115 if (instance->fw_outstanding > 2) {
1116 1116 return (1);
1117 1117 }
1118 1118 return (0);
1119 1119 }
1120 1120 /*
1121 1121 * scsi_pkt handling
1122 1122 *
1123 1123 * Visible to the external world via the transport structure.
1124 1124 */
1125 1125
1126 1126 int
1127 1127 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1128 1128 {
1129 1129 struct mrsas_instance *instance = ADDR2MR(ap);
1130 1130 struct scsa_cmd *acmd = PKT2CMD(pkt);
1131 1131 struct mrsas_cmd *cmd = NULL;
1132 1132 uchar_t cmd_done = 0;
1133 1133
1134 1134 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1135 1135 if (instance->deadadapter == 1) {
1136 1136 dev_err(instance->dip, CE_WARN,
1137 1137 "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1138 1138 "for IO, as the HBA doesnt take any more IOs");
1139 1139 if (pkt) {
1140 1140 pkt->pkt_reason = CMD_DEV_GONE;
1141 1141 pkt->pkt_statistics = STAT_DISCON;
1142 1142 }
1143 1143 return (TRAN_FATAL_ERROR);
1144 1144 }
1145 1145 if (instance->adapterresetinprogress) {
1146 1146 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1147 1147 "returning mfi_pkt and setting TRAN_BUSY\n"));
1148 1148 return (TRAN_BUSY);
1149 1149 }
1150 1150 (void) mrsas_tbolt_prepare_pkt(acmd);
1151 1151
1152 1152 cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1153 1153
1154 1154 /*
1155 1155 * Check if the command is already completed by the mrsas_build_cmd()
1156 1156 * routine. In which case the busy_flag would be clear and scb will be
1157 1157 * NULL and appropriate reason provided in pkt_reason field
1158 1158 */
1159 1159 if (cmd_done) {
1160 1160 pkt->pkt_reason = CMD_CMPLT;
1161 1161 pkt->pkt_scbp[0] = STATUS_GOOD;
1162 1162 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1163 1163 | STATE_SENT_CMD;
1164 1164 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1165 1165 (*pkt->pkt_comp)(pkt);
1166 1166 }
1167 1167
1168 1168 return (TRAN_ACCEPT);
1169 1169 }
1170 1170
1171 1171 if (cmd == NULL) {
1172 1172 return (TRAN_BUSY);
1173 1173 }
1174 1174
1175 1175
1176 1176 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1177 1177 if (instance->fw_outstanding > instance->max_fw_cmds) {
1178 1178 dev_err(instance->dip, CE_WARN,
1179 1179 "Command Queue Full... Returning BUSY");
1180 1180 DTRACE_PROBE2(tbolt_start_tran_err,
1181 1181 uint16_t, instance->fw_outstanding,
1182 1182 uint16_t, instance->max_fw_cmds);
1183 1183 return_raid_msg_pkt(instance, cmd);
1184 1184 return (TRAN_BUSY);
1185 1185 }
1186 1186
1187 1187 /* Synchronize the Cmd frame for the controller */
1188 1188 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1189 1189 DDI_DMA_SYNC_FORDEV);
1190 1190
1191 1191 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1192 1192 "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1193 1193 cmd->index, cmd->SMID));
1194 1194
1195 1195 instance->func_ptr->issue_cmd(cmd, instance);
1196 1196 } else {
1197 1197 instance->func_ptr->issue_cmd(cmd, instance);
1198 1198 (void) wait_for_outstanding_poll_io(instance);
1199 1199 (void) mrsas_common_check(instance, cmd);
1200 1200 DTRACE_PROBE2(tbolt_start_nointr_done,
1201 1201 uint8_t, cmd->frame->hdr.cmd,
1202 1202 uint8_t, cmd->frame->hdr.cmd_status);
1203 1203 }
1204 1204
1205 1205 return (TRAN_ACCEPT);
1206 1206 }
1207 1207
1208 1208 /*
1209 1209 * prepare the pkt:
1210 1210 * the pkt may have been resubmitted or just reused so
1211 1211 * initialize some fields and do some checks.
1212 1212 */
1213 1213 static int
1214 1214 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1215 1215 {
1216 1216 struct scsi_pkt *pkt = CMD2PKT(acmd);
1217 1217
1218 1218
1219 1219 /*
1220 1220 * Reinitialize some fields that need it; the packet may
1221 1221 * have been resubmitted
1222 1222 */
1223 1223 pkt->pkt_reason = CMD_CMPLT;
1224 1224 pkt->pkt_state = 0;
1225 1225 pkt->pkt_statistics = 0;
1226 1226 pkt->pkt_resid = 0;
1227 1227
1228 1228 /*
1229 1229 * zero status byte.
1230 1230 */
1231 1231 *(pkt->pkt_scbp) = 0;
1232 1232
1233 1233 return (0);
1234 1234 }
1235 1235
1236 1236
1237 1237 int
1238 1238 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1239 1239 struct scsa_cmd *acmd,
1240 1240 struct mrsas_cmd *cmd,
1241 1241 Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1242 1242 uint32_t *datalen)
1243 1243 {
1244 1244 uint32_t MaxSGEs;
1245 1245 int sg_to_process;
1246 1246 uint32_t i, j;
1247 1247 uint32_t numElements, endElement;
1248 1248 Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
1249 1249 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
1250 1250 ddi_acc_handle_t acc_handle =
1251 1251 instance->mpi2_frame_pool_dma_obj.acc_handle;
1252 1252
1253 1253 con_log(CL_ANN1, (CE_NOTE,
1254 1254 "chkpnt: Building Chained SGL :%d", __LINE__));
1255 1255
1256 1256 /* Calulate SGE size in number of Words(32bit) */
1257 1257 /* Clear the datalen before updating it. */
1258 1258 *datalen = 0;
1259 1259
1260 1260 MaxSGEs = instance->max_sge_in_main_msg;
1261 1261
1262 1262 ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1263 1263 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1264 1264
1265 1265 /* set data transfer flag. */
1266 1266 if (acmd->cmd_flags & CFLAG_DMASEND) {
1267 1267 ddi_put32(acc_handle, &scsi_raid_io->Control,
1268 1268 MPI2_SCSIIO_CONTROL_WRITE);
1269 1269 } else {
1270 1270 ddi_put32(acc_handle, &scsi_raid_io->Control,
1271 1271 MPI2_SCSIIO_CONTROL_READ);
1272 1272 }
1273 1273
1274 1274
1275 1275 numElements = acmd->cmd_cookiecnt;
1276 1276
1277 1277 con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1278 1278
1279 1279 if (numElements > instance->max_num_sge) {
1280 1280 con_log(CL_ANN, (CE_NOTE,
1281 1281 "[Max SGE Count Exceeded]:%x", numElements));
1282 1282 return (numElements);
1283 1283 }
1284 1284
1285 1285 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1286 1286 (uint8_t)numElements);
1287 1287
1288 1288 /* set end element in main message frame */
1289 1289 endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1290 1290
1291 1291 /* prepare the scatter-gather list for the firmware */
1292 1292 scsi_raid_io_sgl_ieee =
1293 1293 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1294 1294
1295 1295 if (instance->gen3) {
1296 1296 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1297 1297 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1298 1298
1299 1299 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1300 1300 }
1301 1301
1302 1302 for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1303 1303 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1304 1304 acmd->cmd_dmacookies[i].dmac_laddress);
1305 1305
1306 1306 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1307 1307 acmd->cmd_dmacookies[i].dmac_size);
1308 1308
1309 1309 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1310 1310
1311 1311 if (instance->gen3) {
1312 1312 if (i == (numElements - 1)) {
1313 1313 ddi_put8(acc_handle,
1314 1314 &scsi_raid_io_sgl_ieee->Flags,
1315 1315 IEEE_SGE_FLAGS_END_OF_LIST);
1316 1316 }
1317 1317 }
1318 1318
1319 1319 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1320 1320
1321 1321 #ifdef DEBUG
1322 1322 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1323 1323 scsi_raid_io_sgl_ieee->Address));
1324 1324 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1325 1325 scsi_raid_io_sgl_ieee->Length));
1326 1326 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1327 1327 scsi_raid_io_sgl_ieee->Flags));
1328 1328 #endif
1329 1329
1330 1330 }
1331 1331
1332 1332 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1333 1333
1334 1334 /* check if chained SGL required */
1335 1335 if (i < numElements) {
1336 1336
1337 1337 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1338 1338
1339 1339 if (instance->gen3) {
1340 1340 uint16_t ioFlags =
1341 1341 ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1342 1342
1343 1343 if ((ioFlags &
1344 1344 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1345 1345 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1346 1346 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1347 1347 (U8)instance->chain_offset_io_req);
1348 1348 } else {
1349 1349 ddi_put8(acc_handle,
1350 1350 &scsi_raid_io->ChainOffset, 0);
1351 1351 }
1352 1352 } else {
1353 1353 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1354 1354 (U8)instance->chain_offset_io_req);
1355 1355 }
1356 1356
1357 1357 /* prepare physical chain element */
1358 1358 ieeeChainElement = scsi_raid_io_sgl_ieee;
1359 1359
1360 1360 ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1361 1361
1362 1362 if (instance->gen3) {
1363 1363 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1364 1364 IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1365 1365 } else {
1366 1366 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1367 1367 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1368 1368 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1369 1369 }
1370 1370
1371 1371 ddi_put32(acc_handle, &ieeeChainElement->Length,
1372 1372 (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1373 1373
1374 1374 ddi_put64(acc_handle, &ieeeChainElement->Address,
1375 1375 (U64)cmd->sgl_phys_addr);
1376 1376
1377 1377 sg_to_process = numElements - i;
1378 1378
1379 1379 con_log(CL_ANN1, (CE_NOTE,
1380 1380 "[Additional SGE Count]:%x", endElement));
1381 1381
1382 1382 /* point to the chained SGL buffer */
1383 1383 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1384 1384
1385 1385 /* build rest of the SGL in chained buffer */
1386 1386 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1387 1387 con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1388 1388
1389 1389 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1390 1390 acmd->cmd_dmacookies[i].dmac_laddress);
1391 1391
1392 1392 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1393 1393 acmd->cmd_dmacookies[i].dmac_size);
1394 1394
1395 1395 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1396 1396
1397 1397 if (instance->gen3) {
1398 1398 if (i == (numElements - 1)) {
1399 1399 ddi_put8(acc_handle,
1400 1400 &scsi_raid_io_sgl_ieee->Flags,
1401 1401 IEEE_SGE_FLAGS_END_OF_LIST);
1402 1402 }
1403 1403 }
1404 1404
1405 1405 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1406 1406
1407 1407 #if DEBUG
1408 1408 con_log(CL_DLEVEL1, (CE_NOTE,
1409 1409 "[SGL Address]: %" PRIx64,
1410 1410 scsi_raid_io_sgl_ieee->Address));
1411 1411 con_log(CL_DLEVEL1, (CE_NOTE,
1412 1412 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1413 1413 con_log(CL_DLEVEL1, (CE_NOTE,
1414 1414 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1415 1415 #endif
1416 1416
1417 1417 i++;
1418 1418 }
1419 1419 }
1420 1420
1421 1421 return (0);
1422 1422 } /*end of BuildScatterGather */
1423 1423
1424 1424
1425 1425 /*
1426 1426 * build_cmd
1427 1427 */
1428 1428 static struct mrsas_cmd *
1429 1429 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1430 1430 struct scsi_pkt *pkt, uchar_t *cmd_done)
1431 1431 {
1432 1432 uint8_t fp_possible = 0;
1433 1433 uint32_t index;
1434 1434 uint32_t lba_count = 0;
1435 1435 uint32_t start_lba_hi = 0;
1436 1436 uint32_t start_lba_lo = 0;
1437 1437 ddi_acc_handle_t acc_handle =
1438 1438 instance->mpi2_frame_pool_dma_obj.acc_handle;
1439 1439 struct mrsas_cmd *cmd = NULL;
1440 1440 struct scsa_cmd *acmd = PKT2CMD(pkt);
1441 1441 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
1442 1442 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1443 1443 uint32_t datalen;
1444 1444 struct IO_REQUEST_INFO io_info;
1445 1445 MR_FW_RAID_MAP_ALL *local_map_ptr;
1446 1446 uint16_t pd_cmd_cdblen;
1447 1447
1448 1448 con_log(CL_DLEVEL1, (CE_NOTE,
1449 1449 "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1450 1450
1451 1451 /* find out if this is logical or physical drive command. */
1452 1452 acmd->islogical = MRDRV_IS_LOGICAL(ap);
1453 1453 acmd->device_id = MAP_DEVICE_ID(instance, ap);
1454 1454
1455 1455 *cmd_done = 0;
1456 1456
1457 1457 /* get the command packet */
1458 1458 if (!(cmd = get_raid_msg_pkt(instance))) {
1459 1459 DTRACE_PROBE2(tbolt_build_cmd_mfi_err, uint16_t,
1460 1460 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
1461 1461 return (NULL);
1462 1462 }
1463 1463
1464 1464 index = cmd->index;
1465 1465 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
1466 1466 ReqDescUnion->Words = 0;
1467 1467 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1468 1468 ReqDescUnion->SCSIIO.RequestFlags =
1469 1469 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1470 1470 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1471 1471
1472 1472
1473 1473 cmd->request_desc = ReqDescUnion;
1474 1474 cmd->pkt = pkt;
1475 1475 cmd->cmd = acmd;
1476 1476
1477 1477 DTRACE_PROBE4(tbolt_build_cmd, uint8_t, pkt->pkt_cdbp[0],
1478 1478 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len,
1479 1479 uint16_t, acmd->device_id);
1480 1480
1481 1481 /* lets get the command directions */
1482 1482 if (acmd->cmd_flags & CFLAG_DMASEND) {
1483 1483 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1484 1484 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1485 1485 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1486 1486 DDI_DMA_SYNC_FORDEV);
1487 1487 }
1488 1488 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1489 1489 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1490 1490 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1491 1491 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1492 1492 DDI_DMA_SYNC_FORCPU);
1493 1493 }
1494 1494 } else {
1495 1495 con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1496 1496 }
1497 1497
1498 1498
1499 1499 /* get SCSI_IO raid message frame pointer */
1500 1500 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1501 1501
1502 1502 /* zero out SCSI_IO raid message frame */
1503 1503 bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1504 1504
1505 1505 /* Set the ldTargetId set by BuildRaidContext() */
1506 1506 ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1507 1507 acmd->device_id);
1508 1508
1509 1509 /* Copy CDB to scsi_io_request message frame */
1510 1510 ddi_rep_put8(acc_handle,
1511 1511 (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1512 1512 acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1513 1513
1514 1514 /*
1515 1515 * Just the CDB length, rest of the Flags are zero
1516 1516 * This will be modified later.
1517 1517 */
1518 1518 ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1519 1519
1520 1520 pd_cmd_cdblen = acmd->cmd_cdblen;
1521 1521
1522 1522 if (acmd->islogical) {
1523 1523
1524 1524 switch (pkt->pkt_cdbp[0]) {
1525 1525 case SCMD_READ:
1526 1526 case SCMD_WRITE:
1527 1527 case SCMD_READ_G1:
1528 1528 case SCMD_WRITE_G1:
1529 1529 case SCMD_READ_G4:
1530 1530 case SCMD_WRITE_G4:
1531 1531 case SCMD_READ_G5:
1532 1532 case SCMD_WRITE_G5:
1533 1533
1534 1534 /* Initialize sense Information */
1535 1535 if (cmd->sense1 == NULL) {
1536 1536 con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1537 1537 "Sense buffer ptr NULL "));
1538 1538 }
1539 1539 bzero(cmd->sense1, SENSE_LENGTH);
1540 1540 con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1541 1541 "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1542 1542
1543 1543 if (acmd->cmd_cdblen == CDB_GROUP0) {
1544 1544 /* 6-byte cdb */
1545 1545 lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1546 1546 start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1547 1547 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1548 1548 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1549 1549 << 16));
1550 1550 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
1551 1551 /* 10-byte cdb */
1552 1552 lba_count =
1553 1553 (((uint16_t)(pkt->pkt_cdbp[8])) |
1554 1554 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1555 1555
1556 1556 start_lba_lo =
1557 1557 (((uint32_t)(pkt->pkt_cdbp[5])) |
1558 1558 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1559 1559 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1560 1560 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1561 1561
1562 1562 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
1563 1563 /* 12-byte cdb */
1564 1564 lba_count = (
1565 1565 ((uint32_t)(pkt->pkt_cdbp[9])) |
1566 1566 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1567 1567 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1568 1568 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1569 1569
1570 1570 start_lba_lo =
1571 1571 (((uint32_t)(pkt->pkt_cdbp[5])) |
1572 1572 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1573 1573 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1574 1574 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1575 1575
1576 1576 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
1577 1577 /* 16-byte cdb */
1578 1578 lba_count = (
1579 1579 ((uint32_t)(pkt->pkt_cdbp[13])) |
1580 1580 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1581 1581 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1582 1582 ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1583 1583
1584 1584 start_lba_lo = (
1585 1585 ((uint32_t)(pkt->pkt_cdbp[9])) |
1586 1586 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1587 1587 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1588 1588 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1589 1589
1590 1590 start_lba_hi = (
1591 1591 ((uint32_t)(pkt->pkt_cdbp[5])) |
1592 1592 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1593 1593 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1594 1594 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1595 1595 }
1596 1596
1597 1597 if (instance->tbolt &&
1598 1598 ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1599 1599 dev_err(instance->dip, CE_WARN,
1600 1600 "IO SECTOR COUNT exceeds "
1601 1601 "controller limit 0x%x sectors",
1602 1602 lba_count);
1603 1603 }
1604 1604
1605 1605 bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1606 1606 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1607 1607 start_lba_lo;
1608 1608 io_info.numBlocks = lba_count;
1609 1609 io_info.ldTgtId = acmd->device_id;
1610 1610
1611 1611 if (acmd->cmd_flags & CFLAG_DMASEND)
1612 1612 io_info.isRead = 0;
1613 1613 else
1614 1614 io_info.isRead = 1;
1615 1615
1616 1616
1617 1617 /* Acquire SYNC MAP UPDATE lock */
1618 1618 mutex_enter(&instance->sync_map_mtx);
1619 1619
1620 1620 local_map_ptr =
1621 1621 instance->ld_map[(instance->map_id & 1)];
1622 1622
1623 1623 if ((MR_TargetIdToLdGet(
1624 1624 acmd->device_id, local_map_ptr) >=
1625 1625 MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1626 1626 dev_err(instance->dip, CE_NOTE,
1627 1627 "Fast Path NOT Possible, "
1628 1628 "targetId >= MAX_LOGICAL_DRIVES || "
1629 1629 "!instance->fast_path_io");
1630 1630 fp_possible = 0;
1631 1631 /* Set Regionlock flags to BYPASS */
1632 1632 /* io_request->RaidContext.regLockFlags = 0; */
1633 1633 ddi_put8(acc_handle,
1634 1634 &scsi_raid_io->RaidContext.regLockFlags, 0);
1635 1635 } else {
1636 1636 if (MR_BuildRaidContext(instance, &io_info,
1637 1637 &scsi_raid_io->RaidContext, local_map_ptr))
1638 1638 fp_possible = io_info.fpOkForIo;
1639 1639 }
1640 1640
1641 1641 if (!enable_fp)
1642 1642 fp_possible = 0;
1643 1643
1644 1644 con_log(CL_ANN1, (CE_NOTE, "enable_fp %d "
1645 1645 "instance->fast_path_io %d fp_possible %d",
1646 1646 enable_fp, instance->fast_path_io, fp_possible));
1647 1647
1648 1648 if (fp_possible) {
1649 1649
1650 1650 /* Check for DIF enabled LD */
1651 1651 if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1652 1652 /* Prepare 32 Byte CDB for DIF capable Disk */
1653 1653 mrsas_tbolt_prepare_cdb(instance,
1654 1654 scsi_raid_io->CDB.CDB32,
1655 1655 &io_info, scsi_raid_io, start_lba_lo);
1656 1656 } else {
1657 1657 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1658 1658 sizeof (scsi_raid_io->CDB.CDB32),
1659 1659 (uint8_t *)&pd_cmd_cdblen,
1660 1660 io_info.pdBlock, io_info.numBlocks);
1661 1661 ddi_put16(acc_handle,
1662 1662 &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1663 1663 }
1664 1664
1665 1665 ddi_put8(acc_handle, &scsi_raid_io->Function,
1666 1666 MPI2_FUNCTION_SCSI_IO_REQUEST);
1667 1667
1668 1668 ReqDescUnion->SCSIIO.RequestFlags =
1669 1669 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1670 1670 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1671 1671
1672 1672 if (instance->gen3) {
1673 1673 uint8_t regLockFlags = ddi_get8(acc_handle,
1674 1674 &scsi_raid_io->RaidContext.regLockFlags);
1675 1675 uint16_t IoFlags = ddi_get16(acc_handle,
1676 1676 &scsi_raid_io->IoFlags);
1677 1677
1678 1678 if (regLockFlags == REGION_TYPE_UNUSED)
1679 1679 ReqDescUnion->SCSIIO.RequestFlags =
1680 1680 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1681 1681 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1682 1682
1683 1683 IoFlags |=
1684 1684 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1685 1685 regLockFlags |=
1686 1686 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1687 1687 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1688 1688
1689 1689 ddi_put8(acc_handle,
1690 1690 &scsi_raid_io->ChainOffset, 0);
1691 1691 ddi_put8(acc_handle,
1692 1692 &scsi_raid_io->RaidContext.nsegType,
1693 1693 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1694 1694 MPI2_TYPE_CUDA));
1695 1695 ddi_put8(acc_handle,
1696 1696 &scsi_raid_io->RaidContext.regLockFlags,
1697 1697 regLockFlags);
1698 1698 ddi_put16(acc_handle,
1699 1699 &scsi_raid_io->IoFlags, IoFlags);
1700 1700 }
1701 1701
1702 1702 if ((instance->load_balance_info[
1703 1703 acmd->device_id].loadBalanceFlag) &&
1704 1704 (io_info.isRead)) {
1705 1705 io_info.devHandle =
1706 1706 get_updated_dev_handle(&instance->
1707 1707 load_balance_info[acmd->device_id],
1708 1708 &io_info);
1709 1709 cmd->load_balance_flag |=
1710 1710 MEGASAS_LOAD_BALANCE_FLAG;
1711 1711 } else {
1712 1712 cmd->load_balance_flag &=
1713 1713 ~MEGASAS_LOAD_BALANCE_FLAG;
1714 1714 }
1715 1715
1716 1716 ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1717 1717 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1718 1718 io_info.devHandle);
1719 1719
1720 1720 } else { /* FP Not Possible */
1721 1721
1722 1722 ddi_put8(acc_handle, &scsi_raid_io->Function,
1723 1723 MPI2_FUNCTION_LD_IO_REQUEST);
1724 1724
1725 1725 ddi_put16(acc_handle,
1726 1726 &scsi_raid_io->DevHandle, acmd->device_id);
1727 1727
1728 1728 ReqDescUnion->SCSIIO.RequestFlags =
1729 1729 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1730 1730 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1731 1731
1732 1732 ddi_put16(acc_handle,
1733 1733 &scsi_raid_io->RaidContext.timeoutValue,
1734 1734 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1735 1735
1736 1736 if (instance->gen3) {
1737 1737 uint8_t regLockFlags = ddi_get8(acc_handle,
1738 1738 &scsi_raid_io->RaidContext.regLockFlags);
1739 1739
1740 1740 if (regLockFlags == REGION_TYPE_UNUSED) {
1741 1741 ReqDescUnion->SCSIIO.RequestFlags =
1742 1742 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1743 1743 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1744 1744 }
1745 1745
1746 1746 regLockFlags |=
1747 1747 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1748 1748 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1749 1749
1750 1750 ddi_put8(acc_handle,
1751 1751 &scsi_raid_io->RaidContext.nsegType,
1752 1752 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1753 1753 MPI2_TYPE_CUDA));
1754 1754 ddi_put8(acc_handle,
1755 1755 &scsi_raid_io->RaidContext.regLockFlags,
1756 1756 regLockFlags);
1757 1757 }
1758 1758 } /* Not FP */
1759 1759
1760 1760 /* Release SYNC MAP UPDATE lock */
1761 1761 mutex_exit(&instance->sync_map_mtx);
1762 1762
1763 1763 break;
1764 1764
1765 1765 case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1766 1766 return_raid_msg_pkt(instance, cmd);
1767 1767 *cmd_done = 1;
1768 1768 return (NULL);
1769 1769 }
1770 1770
1771 1771 case SCMD_MODE_SENSE:
1772 1772 case SCMD_MODE_SENSE_G1: {
1773 1773 union scsi_cdb *cdbp;
1774 1774 uint16_t page_code;
1775 1775
1776 1776 cdbp = (void *)pkt->pkt_cdbp;
1777 1777 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1778 1778 switch (page_code) {
1779 1779 case 0x3:
1780 1780 case 0x4:
1781 1781 (void) mrsas_mode_sense_build(pkt);
1782 1782 return_raid_msg_pkt(instance, cmd);
1783 1783 *cmd_done = 1;
1784 1784 return (NULL);
1785 1785 }
1786 1786 return (cmd);
1787 1787 }
1788 1788
1789 1789 default:
1790 1790 /* Pass-through command to logical drive */
1791 1791 ddi_put8(acc_handle, &scsi_raid_io->Function,
1792 1792 MPI2_FUNCTION_LD_IO_REQUEST);
1793 1793 ddi_put8(acc_handle, &scsi_raid_io->LUN[1], acmd->lun);
1794 1794 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1795 1795 acmd->device_id);
1796 1796 ReqDescUnion->SCSIIO.RequestFlags =
1797 1797 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1798 1798 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1799 1799 break;
1800 1800 }
1801 1801 } else { /* Physical */
1802 1802 /* Pass-through command to physical drive */
1803 1803
1804 1804 /* Acquire SYNC MAP UPDATE lock */
1805 1805 mutex_enter(&instance->sync_map_mtx);
1806 1806
1807 1807 local_map_ptr = instance->ld_map[instance->map_id & 1];
1808 1808
1809 1809 ddi_put8(acc_handle, &scsi_raid_io->Function,
1810 1810 MPI2_FUNCTION_SCSI_IO_REQUEST);
1811 1811
1812 1812 ReqDescUnion->SCSIIO.RequestFlags =
1813 1813 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1814 1814 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1815 1815
1816 1816 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1817 1817 local_map_ptr->raidMap.
1818 1818 devHndlInfo[acmd->device_id].curDevHdl);
1819 1819
1820 1820 /* Set regLockFlasgs to REGION_TYPE_BYPASS */
1821 1821 ddi_put8(acc_handle,
1822 1822 &scsi_raid_io->RaidContext.regLockFlags, 0);
1823 1823 ddi_put64(acc_handle,
1824 1824 &scsi_raid_io->RaidContext.regLockRowLBA, 0);
1825 1825 ddi_put32(acc_handle,
1826 1826 &scsi_raid_io->RaidContext.regLockLength, 0);
1827 1827 ddi_put8(acc_handle,
1828 1828 &scsi_raid_io->RaidContext.RAIDFlags,
1829 1829 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1830 1830 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1831 1831 ddi_put16(acc_handle,
1832 1832 &scsi_raid_io->RaidContext.timeoutValue,
1833 1833 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1834 1834 ddi_put16(acc_handle,
1835 1835 &scsi_raid_io->RaidContext.ldTargetId,
1836 1836 acmd->device_id);
1837 1837 ddi_put8(acc_handle,
1838 1838 &scsi_raid_io->LUN[1], acmd->lun);
1839 1839
1840 1840 if (instance->fast_path_io && instance->gen3) {
1841 1841 uint16_t IoFlags = ddi_get16(acc_handle,
1842 1842 &scsi_raid_io->IoFlags);
1843 1843 IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1844 1844 ddi_put16(acc_handle, &scsi_raid_io->IoFlags, IoFlags);
1845 1845 }
1846 1846 ddi_put16(acc_handle, &ReqDescUnion->SCSIIO.DevHandle,
1847 1847 local_map_ptr->raidMap.
1848 1848 devHndlInfo[acmd->device_id].curDevHdl);
1849 1849
1850 1850 /* Release SYNC MAP UPDATE lock */
1851 1851 mutex_exit(&instance->sync_map_mtx);
1852 1852 }
1853 1853
1854 1854 /* Set sense buffer physical address/length in scsi_io_request. */
1855 1855 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1856 1856 cmd->sense_phys_addr1);
1857 1857 ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1858 1858
1859 1859 /* Construct SGL */
1860 1860 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1861 1861 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1862 1862
1863 1863 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1864 1864 scsi_raid_io, &datalen);
1865 1865
1866 1866 ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1867 1867
1868 1868 con_log(CL_ANN, (CE_CONT,
1869 1869 "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1870 1870 pkt->pkt_cdbp[0], acmd->device_id));
1871 1871 con_log(CL_DLEVEL1, (CE_CONT,
1872 1872 "data length = %x\n",
1873 1873 scsi_raid_io->DataLength));
1874 1874 con_log(CL_DLEVEL1, (CE_CONT,
1875 1875 "cdb length = %x\n",
1876 1876 acmd->cmd_cdblen));
1877 1877
1878 1878 return (cmd);
1879 1879 }
1880 1880
1881 1881 uint32_t
1882 1882 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1883 1883 {
1884 1884 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1885 1885 }
1886 1886
1887 1887 void
1888 1888 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1889 1889 {
1890 1890 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1891 1891 atomic_inc_16(&instance->fw_outstanding);
1892 1892
1893 1893 struct scsi_pkt *pkt;
1894 1894
1895 1895 con_log(CL_ANN1,
1896 1896 (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1897 1897
1898 1898 con_log(CL_DLEVEL1, (CE_CONT,
1899 1899 " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1900 1900 con_log(CL_DLEVEL1, (CE_CONT,
1901 1901 " [req desc low part] %x \n",
1902 1902 (uint_t)(req_desc->Words & 0xffffffffff)));
1903 1903 con_log(CL_DLEVEL1, (CE_CONT,
1904 1904 " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1905 1905 pkt = cmd->pkt;
1906 1906
1907 1907 if (pkt) {
1908 1908 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1909 1909 "ISSUED CMD TO FW : called : cmd:"
1910 1910 ": %p instance : %p pkt : %p pkt_time : %x\n",
1911 1911 gethrtime(), (void *)cmd, (void *)instance,
1912 1912 (void *)pkt, cmd->drv_pkt_time));
1913 1913 if (instance->adapterresetinprogress) {
1914 1914 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1915 1915 con_log(CL_ANN, (CE_NOTE,
1916 1916 "TBOLT Reset the scsi_pkt timer"));
1917 1917 } else {
1918 1918 push_pending_mfi_pkt(instance, cmd);
1919 1919 }
1920 1920
1921 1921 } else {
1922 1922 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1923 1923 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
1924 1924 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
1925 1925 }
1926 1926
1927 1927 /* Issue the command to the FW */
1928 1928 mutex_enter(&instance->reg_write_mtx);
1929 1929 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1930 1930 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1931 1931 mutex_exit(&instance->reg_write_mtx);
1932 1932 }
1933 1933
1934 1934 /*
1935 1935 * issue_cmd_in_sync_mode
1936 1936 */
1937 1937 int
1938 1938 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
1939 1939 struct mrsas_cmd *cmd)
1940 1940 {
1941 1941 int i;
1942 1942 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
1943 1943 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1944 1944
1945 1945 struct mrsas_header *hdr;
1946 1946 hdr = (struct mrsas_header *)&cmd->frame->hdr;
1947 1947
1948 1948 con_log(CL_ANN,
1949 1949 (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
1950 1950 cmd->SMID));
1951 1951
1952 1952
1953 1953 if (instance->adapterresetinprogress) {
1954 1954 cmd->drv_pkt_time = ddi_get16
1955 1955 (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
1956 1956 if (cmd->drv_pkt_time < debug_timeout_g)
1957 1957 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1958 1958 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
1959 1959 "RESET-IN-PROGRESS, issue cmd & return."));
1960 1960
1961 1961 mutex_enter(&instance->reg_write_mtx);
1962 1962 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1963 1963 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1964 1964 mutex_exit(&instance->reg_write_mtx);
1965 1965
1966 1966 return (DDI_SUCCESS);
1967 1967 } else {
1968 1968 con_log(CL_ANN1, (CE_NOTE,
1969 1969 "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
1970 1970 push_pending_mfi_pkt(instance, cmd);
1971 1971 }
1972 1972
1973 1973 con_log(CL_DLEVEL2, (CE_NOTE,
1974 1974 "HighQport offset :%p",
1975 1975 (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
1976 1976 con_log(CL_DLEVEL2, (CE_NOTE,
1977 1977 "LowQport offset :%p",
1978 1978 (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
1979 1979
1980 1980 cmd->sync_cmd = MRSAS_TRUE;
1981 1981 cmd->cmd_status = ENODATA;
1982 1982
1983 1983
1984 1984 mutex_enter(&instance->reg_write_mtx);
1985 1985 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1986 1986 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1987 1987 mutex_exit(&instance->reg_write_mtx);
1988 1988
1989 1989 con_log(CL_ANN1, (CE_NOTE,
1990 1990 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
1991 1991 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
1992 1992 (uint_t)(req_desc->Words & 0xffffffff)));
1993 1993
1994 1994 mutex_enter(&instance->int_cmd_mtx);
1995 1995 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
1996 1996 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
1997 1997 }
1998 1998 mutex_exit(&instance->int_cmd_mtx);
1999 1999
2000 2000
2001 2001 if (i < (msecs -1)) {
2002 2002 return (DDI_SUCCESS);
2003 2003 } else {
2004 2004 return (DDI_FAILURE);
2005 2005 }
2006 2006 }
2007 2007
2008 2008 /*
2009 2009 * issue_cmd_in_poll_mode
2010 2010 */
2011 2011 int
2012 2012 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2013 2013 struct mrsas_cmd *cmd)
2014 2014 {
2015 2015 int i;
2016 2016 uint16_t flags;
2017 2017 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2018 2018 struct mrsas_header *frame_hdr;
2019 2019
2020 2020 con_log(CL_ANN,
2021 2021 (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2022 2022 cmd->SMID));
2023 2023
2024 2024 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2025 2025
2026 2026 frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2027 2027 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2028 2028 MFI_CMD_STATUS_POLL_MODE);
2029 2029 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2030 2030 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2031 2031 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2032 2032
2033 2033 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2034 2034 (uint_t)(req_desc->Words & 0xffffffff)));
2035 2035 con_log(CL_ANN1, (CE_NOTE,
2036 2036 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2037 2037
2038 2038 /* issue the frame using inbound queue port */
2039 2039 mutex_enter(&instance->reg_write_mtx);
2040 2040 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2041 2041 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2042 2042 mutex_exit(&instance->reg_write_mtx);
2043 2043
2044 2044 for (i = 0; i < msecs && (
2045 2045 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2046 2046 == MFI_CMD_STATUS_POLL_MODE); i++) {
2047 2047 /* wait for cmd_status to change from 0xFF */
2048 2048 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2049 2049 }
2050 2050
2051 2051 DTRACE_PROBE1(tbolt_complete_poll_cmd, uint8_t, i);
2052 2052
2053 2053 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2054 2054 &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2055 2055 con_log(CL_ANN1, (CE_NOTE,
2056 2056 " cmd failed %" PRIx64, (req_desc->Words)));
2057 2057 return (DDI_FAILURE);
2058 2058 }
2059 2059
2060 2060 return (DDI_SUCCESS);
2061 2061 }
2062 2062
2063 2063 void
2064 2064 tbolt_enable_intr(struct mrsas_instance *instance)
2065 2065 {
2066 2066 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2067 2067 /* writel(~0, ®s->outbound_intr_status); */
2068 2068 /* readl(®s->outbound_intr_status); */
2069 2069
2070 2070 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2071 2071
2072 2072 /* dummy read to force PCI flush */
2073 2073 (void) RD_OB_INTR_MASK(instance);
2074 2074
2075 2075 }
2076 2076
2077 2077 void
2078 2078 tbolt_disable_intr(struct mrsas_instance *instance)
2079 2079 {
2080 2080 uint32_t mask = 0xFFFFFFFF;
2081 2081
2082 2082 WR_OB_INTR_MASK(mask, instance);
2083 2083
2084 2084 /* Dummy readl to force pci flush */
2085 2085
2086 2086 (void) RD_OB_INTR_MASK(instance);
2087 2087 }
2088 2088
2089 2089
2090 2090 int
2091 2091 tbolt_intr_ack(struct mrsas_instance *instance)
2092 2092 {
2093 2093 uint32_t status;
2094 2094
2095 2095 /* check if it is our interrupt */
2096 2096 status = RD_OB_INTR_STATUS(instance);
2097 2097 con_log(CL_ANN1, (CE_NOTE,
2098 2098 "chkpnt: Entered tbolt_intr_ack status = %d", status));
2099 2099
2100 2100 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2101 2101 return (DDI_INTR_UNCLAIMED);
2102 2102 }
2103 2103
2104 2104 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2105 2105 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2106 2106 return (DDI_INTR_UNCLAIMED);
2107 2107 }
2108 2108
2109 2109 if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2110 2110 /* clear the interrupt by writing back the same value */
2111 2111 WR_OB_INTR_STATUS(status, instance);
2112 2112 /* dummy READ */
2113 2113 (void) RD_OB_INTR_STATUS(instance);
2114 2114 }
2115 2115 return (DDI_INTR_CLAIMED);
2116 2116 }
2117 2117
2118 2118 /*
2119 2119 * get_raid_msg_pkt : Get a command from the free pool
2120 2120 * After successful allocation, the caller of this routine
2121 2121 * must clear the frame buffer (memset to zero) before
2122 2122 * using the packet further.
2123 2123 *
2124 2124 * ***** Note *****
2125 2125 * After clearing the frame buffer the context id of the
2126 2126 * frame buffer SHOULD be restored back.
2127 2127 */
2128 2128
2129 2129 struct mrsas_cmd *
2130 2130 get_raid_msg_pkt(struct mrsas_instance *instance)
2131 2131 {
2132 2132 mlist_t *head = &instance->cmd_pool_list;
2133 2133 struct mrsas_cmd *cmd = NULL;
2134 2134
2135 2135 mutex_enter(&instance->cmd_pool_mtx);
2136 2136 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2137 2137
2138 2138
2139 2139 if (!mlist_empty(head)) {
2140 2140 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2141 2141 mlist_del_init(head->next);
2142 2142 }
2143 2143 if (cmd != NULL) {
2144 2144 cmd->pkt = NULL;
2145 2145 cmd->retry_count_for_ocr = 0;
2146 2146 cmd->drv_pkt_time = 0;
2147 2147 }
2148 2148 mutex_exit(&instance->cmd_pool_mtx);
2149 2149
2150 2150 if (cmd != NULL)
2151 2151 bzero(cmd->scsi_io_request,
2152 2152 sizeof (Mpi2RaidSCSIIORequest_t));
2153 2153 return (cmd);
2154 2154 }
2155 2155
2156 2156 struct mrsas_cmd *
2157 2157 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2158 2158 {
2159 2159 mlist_t *head = &instance->cmd_app_pool_list;
2160 2160 struct mrsas_cmd *cmd = NULL;
2161 2161
2162 2162 mutex_enter(&instance->cmd_app_pool_mtx);
2163 2163 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2164 2164
2165 2165 if (!mlist_empty(head)) {
2166 2166 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2167 2167 mlist_del_init(head->next);
2168 2168 }
2169 2169 if (cmd != NULL) {
2170 2170 cmd->retry_count_for_ocr = 0;
2171 2171 cmd->drv_pkt_time = 0;
2172 2172 cmd->pkt = NULL;
2173 2173 cmd->request_desc = NULL;
2174 2174
2175 2175 }
2176 2176
2177 2177 mutex_exit(&instance->cmd_app_pool_mtx);
2178 2178
2179 2179 if (cmd != NULL) {
2180 2180 bzero(cmd->scsi_io_request,
2181 2181 sizeof (Mpi2RaidSCSIIORequest_t));
2182 2182 }
2183 2183
2184 2184 return (cmd);
2185 2185 }
2186 2186
2187 2187 /*
2188 2188 * return_raid_msg_pkt : Return a cmd to free command pool
2189 2189 */
2190 2190 void
2191 2191 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2192 2192 {
2193 2193 mutex_enter(&instance->cmd_pool_mtx);
2194 2194 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2195 2195
2196 2196
2197 2197 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2198 2198
2199 2199 mutex_exit(&instance->cmd_pool_mtx);
2200 2200 }
2201 2201
2202 2202 void
2203 2203 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2204 2204 {
2205 2205 mutex_enter(&instance->cmd_app_pool_mtx);
2206 2206 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2207 2207
2208 2208 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2209 2209
2210 2210 mutex_exit(&instance->cmd_app_pool_mtx);
2211 2211 }
2212 2212
2213 2213
2214 2214 void
2215 2215 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2216 2216 struct mrsas_cmd *cmd)
2217 2217 {
2218 2218 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2219 2219 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
2220 2220 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2221 2221 uint32_t index;
2222 2222 ddi_acc_handle_t acc_handle =
2223 2223 instance->mpi2_frame_pool_dma_obj.acc_handle;
2224 2224
2225 2225 if (!instance->tbolt) {
2226 2226 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2227 2227 return;
2228 2228 }
2229 2229
2230 2230 index = cmd->index;
2231 2231
2232 2232 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2233 2233
2234 2234 if (!ReqDescUnion) {
2235 2235 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2236 2236 return;
2237 2237 }
2238 2238
2239 2239 con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2240 2240
2241 2241 ReqDescUnion->Words = 0;
2242 2242
2243 2243 ReqDescUnion->SCSIIO.RequestFlags =
2244 2244 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2245 2245 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2246 2246
2247 2247 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2248 2248
2249 2249 cmd->request_desc = ReqDescUnion;
2250 2250
2251 2251 /* get raid message frame pointer */
2252 2252 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2253 2253
2254 2254 if (instance->gen3) {
2255 2255 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2256 2256 &scsi_raid_io->SGL.IeeeChain;
2257 2257 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2258 2258 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2259 2259 }
2260 2260
2261 2261 ddi_put8(acc_handle, &scsi_raid_io->Function,
2262 2262 MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2263 2263
2264 2264 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2265 2265 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2266 2266
2267 2267 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2268 2268 (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2269 2269
2270 2270 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2271 2271 cmd->sense_phys_addr1);
2272 2272
2273 2273
2274 2274 scsi_raid_io_sgl_ieee =
2275 2275 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2276 2276
2277 2277 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2278 2278 (U64)cmd->frame_phys_addr);
2279 2279
2280 2280 ddi_put8(acc_handle,
2281 2281 &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2282 2282 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2283 2283 /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2284 2284 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2285 2285
2286 2286 con_log(CL_ANN1, (CE_NOTE,
2287 2287 "[MFI CMD PHY ADDRESS]:%" PRIx64,
2288 2288 scsi_raid_io_sgl_ieee->Address));
2289 2289 con_log(CL_ANN1, (CE_NOTE,
2290 2290 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2291 2291 con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2292 2292 scsi_raid_io_sgl_ieee->Flags));
2293 2293 }
2294 2294
2295 2295
2296 2296 void
2297 2297 tbolt_complete_cmd(struct mrsas_instance *instance,
2298 2298 struct mrsas_cmd *cmd)
2299 2299 {
2300 2300 uint8_t status;
2301 2301 uint8_t extStatus;
2302 2302 uint8_t function;
2303 2303 uint8_t arm;
2304 2304 struct scsa_cmd *acmd;
2305 2305 struct scsi_pkt *pkt;
2306 2306 struct scsi_arq_status *arqstat;
2307 2307 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2308 2308 LD_LOAD_BALANCE_INFO *lbinfo;
2309 2309 ddi_acc_handle_t acc_handle =
2310 2310 instance->mpi2_frame_pool_dma_obj.acc_handle;
2311 2311
2312 2312 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2313 2313
2314 2314 status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2315 2315 extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2316 2316
2317 2317 con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2318 2318 con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2319 2319
2320 2320 if (status != MFI_STAT_OK) {
2321 2321 con_log(CL_ANN, (CE_WARN,
2322 2322 "IO Cmd Failed SMID %x", cmd->SMID));
2323 2323 } else {
2324 2324 con_log(CL_ANN, (CE_NOTE,
2325 2325 "IO Cmd Success SMID %x", cmd->SMID));
2326 2326 }
2327 2327
2328 2328 /* regular commands */
2329 2329
2330 2330 function = ddi_get8(acc_handle, &scsi_raid_io->Function);
2331 2331 DTRACE_PROBE3(tbolt_complete_cmd, uint8_t, function,
2332 2332 uint8_t, status, uint8_t, extStatus);
2333 2333
2334 2334 switch (function) {
2335 2335
2336 2336 case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
2337 2337 acmd = (struct scsa_cmd *)cmd->cmd;
2338 2338 lbinfo = &instance->load_balance_info[acmd->device_id];
2339 2339
2340 2340 if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2341 2341 arm = lbinfo->raid1DevHandle[0] ==
2342 2342 scsi_raid_io->DevHandle ? 0 : 1;
2343 2343
2344 2344 lbinfo->scsi_pending_cmds[arm]--;
2345 2345 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2346 2346 }
2347 2347 con_log(CL_DLEVEL3, (CE_NOTE,
2348 2348 "FastPath IO Completion Success "));
2349 2349 /* FALLTHRU */
2350 2350
2351 2351 case MPI2_FUNCTION_LD_IO_REQUEST : { /* Regular Path IO. */
2352 2352 acmd = (struct scsa_cmd *)cmd->cmd;
2353 2353 pkt = (struct scsi_pkt *)CMD2PKT(acmd);
2354 2354
2355 2355 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2356 2356 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2357 2357 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2358 2358 acmd->cmd_dma_offset, acmd->cmd_dma_len,
2359 2359 DDI_DMA_SYNC_FORCPU);
2360 2360 }
2361 2361 }
2362 2362
2363 2363 pkt->pkt_reason = CMD_CMPLT;
2364 2364 pkt->pkt_statistics = 0;
2365 2365 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2366 2366 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2367 2367
2368 2368 con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2369 2369 "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2370 2370 ((acmd->islogical) ? "LD" : "PD"),
2371 2371 acmd->cmd_dmacount, cmd->SMID, status));
2372 2372
2373 2373 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2374 2374 struct scsi_inquiry *inq;
2375 2375
2376 2376 if (acmd->cmd_dmacount != 0) {
2377 2377 bp_mapin(acmd->cmd_buf);
2378 2378 inq = (struct scsi_inquiry *)
2379 2379 acmd->cmd_buf->b_un.b_addr;
2380 2380
2381 2381 /* don't expose physical drives to OS */
2382 2382 if (acmd->islogical &&
2383 2383 (status == MFI_STAT_OK)) {
2384 2384 display_scsi_inquiry((caddr_t)inq);
2385 2385 } else if ((status == MFI_STAT_OK) &&
2386 2386 inq->inq_dtype == DTYPE_DIRECT) {
2387 2387 display_scsi_inquiry((caddr_t)inq);
2388 2388 } else {
2389 2389 /* for physical disk */
2390 2390 status = MFI_STAT_DEVICE_NOT_FOUND;
2391 2391 }
2392 2392 }
2393 2393 }
2394 2394
2395 2395 switch (status) {
2396 2396 case MFI_STAT_OK:
2397 2397 pkt->pkt_scbp[0] = STATUS_GOOD;
2398 2398 break;
2399 2399 case MFI_STAT_LD_CC_IN_PROGRESS:
2400 2400 case MFI_STAT_LD_RECON_IN_PROGRESS:
2401 2401 pkt->pkt_scbp[0] = STATUS_GOOD;
2402 2402 break;
2403 2403 case MFI_STAT_LD_INIT_IN_PROGRESS:
2404 2404 pkt->pkt_reason = CMD_TRAN_ERR;
2405 2405 break;
2406 2406 case MFI_STAT_SCSI_IO_FAILED:
2407 2407 dev_err(instance->dip, CE_WARN,
2408 2408 "tbolt_complete_cmd: scsi_io failed");
2409 2409 pkt->pkt_reason = CMD_TRAN_ERR;
2410 2410 break;
2411 2411 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2412 2412 con_log(CL_ANN, (CE_WARN,
2413 2413 "tbolt_complete_cmd: scsi_done with error"));
2414 2414
2415 2415 pkt->pkt_reason = CMD_CMPLT;
2416 2416 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2417 2417
2418 2418 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2419 2419 con_log(CL_ANN,
2420 2420 (CE_WARN, "TEST_UNIT_READY fail"));
2421 2421 } else {
2422 2422 pkt->pkt_state |= STATE_ARQ_DONE;
2423 2423 arqstat = (void *)(pkt->pkt_scbp);
2424 2424 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2425 2425 arqstat->sts_rqpkt_resid = 0;
2426 2426 arqstat->sts_rqpkt_state |=
2427 2427 STATE_GOT_BUS | STATE_GOT_TARGET
2428 2428 | STATE_SENT_CMD
2429 2429 | STATE_XFERRED_DATA;
2430 2430 *(uint8_t *)&arqstat->sts_rqpkt_status =
2431 2431 STATUS_GOOD;
2432 2432 con_log(CL_ANN1,
2433 2433 (CE_NOTE, "Copying Sense data %x",
2434 2434 cmd->SMID));
2435 2435
2436 2436 ddi_rep_get8(acc_handle,
2437 2437 (uint8_t *)&(arqstat->sts_sensedata),
2438 2438 cmd->sense1,
2439 2439 sizeof (struct scsi_extended_sense),
2440 2440 DDI_DEV_AUTOINCR);
2441 2441
2442 2442 }
2443 2443 break;
2444 2444 case MFI_STAT_LD_OFFLINE:
2445 2445 dev_err(instance->dip, CE_WARN,
2446 2446 "tbolt_complete_cmd: ld offline "
2447 2447 "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2448 2448 /* UNDO: */
2449 2449 ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2450 2450
2451 2451 ddi_get16(acc_handle,
2452 2452 &scsi_raid_io->RaidContext.ldTargetId),
2453 2453
2454 2454 ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2455 2455
2456 2456 pkt->pkt_reason = CMD_DEV_GONE;
2457 2457 pkt->pkt_statistics = STAT_DISCON;
2458 2458 break;
2459 2459 case MFI_STAT_DEVICE_NOT_FOUND:
2460 2460 con_log(CL_ANN, (CE_CONT,
2461 2461 "tbolt_complete_cmd: device not found error"));
2462 2462 pkt->pkt_reason = CMD_DEV_GONE;
2463 2463 pkt->pkt_statistics = STAT_DISCON;
2464 2464 break;
2465 2465
2466 2466 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2467 2467 pkt->pkt_state |= STATE_ARQ_DONE;
2468 2468 pkt->pkt_reason = CMD_CMPLT;
2469 2469 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2470 2470
2471 2471 arqstat = (void *)(pkt->pkt_scbp);
2472 2472 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2473 2473 arqstat->sts_rqpkt_resid = 0;
2474 2474 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2475 2475 | STATE_GOT_TARGET | STATE_SENT_CMD
2476 2476 | STATE_XFERRED_DATA;
2477 2477 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2478 2478
2479 2479 arqstat->sts_sensedata.es_valid = 1;
2480 2480 arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2481 2481 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2482 2482
2483 2483 /*
2484 2484 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2485 2485 * ASC: 0x21h; ASCQ: 0x00h;
2486 2486 */
2487 2487 arqstat->sts_sensedata.es_add_code = 0x21;
2488 2488 arqstat->sts_sensedata.es_qual_code = 0x00;
2489 2489 break;
2490 2490 case MFI_STAT_INVALID_CMD:
2491 2491 case MFI_STAT_INVALID_DCMD:
2492 2492 case MFI_STAT_INVALID_PARAMETER:
2493 2493 case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2494 2494 default:
2495 2495 dev_err(instance->dip, CE_WARN,
2496 2496 "tbolt_complete_cmd: Unknown status!");
2497 2497 pkt->pkt_reason = CMD_TRAN_ERR;
2498 2498
2499 2499 break;
2500 2500 }
2501 2501
2502 2502 atomic_add_16(&instance->fw_outstanding, (-1));
2503 2503
2504 2504 (void) mrsas_common_check(instance, cmd);
2505 2505 if (acmd->cmd_dmahandle) {
2506 2506 if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2507 2507 DDI_SUCCESS) {
2508 2508 ddi_fm_service_impact(instance->dip,
2509 2509 DDI_SERVICE_UNAFFECTED);
2510 2510 pkt->pkt_reason = CMD_TRAN_ERR;
2511 2511 pkt->pkt_statistics = 0;
2512 2512 }
2513 2513 }
2514 2514
2515 2515 /* Call the callback routine */
2516 2516 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2517 2517 (*pkt->pkt_comp)(pkt);
2518 2518
2519 2519 con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2520 2520
2521 2521 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2522 2522
2523 2523 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2524 2524
2525 2525 return_raid_msg_pkt(instance, cmd);
2526 2526 break;
2527 2527 }
2528 2528 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFA command. */
2529 2529
2530 2530 if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2531 2531 cmd->frame->dcmd.mbox.b[1] == 1) {
2532 2532
2533 2533 mutex_enter(&instance->sync_map_mtx);
2534 2534
2535 2535 con_log(CL_ANN, (CE_NOTE,
2536 2536 "LDMAP sync command SMID RECEIVED 0x%X",
2537 2537 cmd->SMID));
2538 2538 if (cmd->frame->hdr.cmd_status != 0) {
2539 2539 dev_err(instance->dip, CE_WARN,
2540 2540 "map sync failed, status = 0x%x.",
2541 2541 cmd->frame->hdr.cmd_status);
2542 2542 } else {
2543 2543 instance->map_id++;
2544 2544 con_log(CL_ANN1, (CE_NOTE,
2545 2545 "map sync received, switched map_id to %"
2546 2546 PRIu64, instance->map_id));
2547 2547 }
2548 2548
2549 2549 if (MR_ValidateMapInfo(
2550 2550 instance->ld_map[instance->map_id & 1],
2551 2551 instance->load_balance_info)) {
2552 2552 instance->fast_path_io = 1;
2553 2553 } else {
2554 2554 instance->fast_path_io = 0;
2555 2555 }
2556 2556
2557 2557 con_log(CL_ANN, (CE_NOTE,
2558 2558 "instance->fast_path_io %d",
2559 2559 instance->fast_path_io));
2560 2560
2561 2561 instance->unroll.syncCmd = 0;
2562 2562
2563 2563 if (instance->map_update_cmd == cmd) {
2564 2564 return_raid_msg_pkt(instance, cmd);
2565 2565 atomic_add_16(&instance->fw_outstanding, (-1));
2566 2566 (void) mrsas_tbolt_sync_map_info(instance);
2567 2567 }
2568 2568
2569 2569 con_log(CL_ANN1, (CE_NOTE,
2570 2570 "LDMAP sync completed, ldcount=%d",
2571 2571 instance->ld_map[instance->map_id & 1]
2572 2572 ->raidMap.ldCount));
2573 2573 mutex_exit(&instance->sync_map_mtx);
2574 2574 break;
2575 2575 }
2576 2576
2577 2577 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2578 2578 con_log(CL_ANN1, (CE_CONT,
2579 2579 "AEN command SMID RECEIVED 0x%X",
2580 2580 cmd->SMID));
2581 2581 if ((instance->aen_cmd == cmd) &&
2582 2582 (instance->aen_cmd->abort_aen)) {
2583 2583 con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2584 2584 "aborted_aen returned"));
2585 2585 } else {
2586 2586 atomic_add_16(&instance->fw_outstanding, (-1));
2587 2587 service_mfi_aen(instance, cmd);
2588 2588 }
2589 2589 }
2590 2590
2591 2591 if (cmd->sync_cmd == MRSAS_TRUE) {
2592 2592 con_log(CL_ANN1, (CE_CONT,
2593 2593 "Sync-mode Command Response SMID RECEIVED 0x%X",
2594 2594 cmd->SMID));
2595 2595
2596 2596 tbolt_complete_cmd_in_sync_mode(instance, cmd);
2597 2597 } else {
2598 2598 con_log(CL_ANN, (CE_CONT,
2599 2599 "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2600 2600 cmd->SMID));
2601 2601 }
2602 2602 break;
2603 2603 default:
2604 2604 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2605 2605 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2606 2606
2607 2607 /* free message */
2608 2608 con_log(CL_ANN,
2609 2609 (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2610 2610 break;
2611 2611 }
2612 2612 }
2613 2613
2614 2614 uint_t
2615 2615 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2616 2616 {
2617 2617 uint8_t replyType;
2618 2618 Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2619 2619 Mpi2ReplyDescriptorsUnion_t *desc;
2620 2620 uint16_t smid;
2621 2621 union desc_value d_val;
2622 2622 struct mrsas_cmd *cmd;
2623 2623
2624 2624 struct mrsas_header *hdr;
2625 2625 struct scsi_pkt *pkt;
2626 2626
2627 2627 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2628 2628 0, 0, DDI_DMA_SYNC_FORDEV);
2629 2629
2630 2630 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2631 2631 0, 0, DDI_DMA_SYNC_FORCPU);
2632 2632
2633 2633 desc = instance->reply_frame_pool;
2634 2634 desc += instance->reply_read_index;
2635 2635
2636 2636 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2637 2637 replyType = replyDesc->ReplyFlags &
2638 2638 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2639 2639
2640 2640 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2641 2641 return (DDI_INTR_UNCLAIMED);
2642 2642
2643 2643 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2644 2644 != DDI_SUCCESS) {
2645 2645 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2646 2646 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2647 2647 con_log(CL_ANN1,
2648 2648 (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2649 2649 "FMA check, returning DDI_INTR_UNCLAIMED"));
2650 2650 return (DDI_INTR_CLAIMED);
2651 2651 }
2652 2652
2653 2653 con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64,
2654 2654 (void *)desc, desc->Words));
2655 2655
2656 2656 d_val.word = desc->Words;
2657 2657
2658 2658
2659 2659 /* Read Reply descriptor */
2660 2660 while ((d_val.u1.low != 0xffffffff) &&
2661 2661 (d_val.u1.high != 0xffffffff)) {
2662 2662
2663 2663 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2664 2664 0, 0, DDI_DMA_SYNC_FORCPU);
2665 2665
2666 2666 smid = replyDesc->SMID;
2667 2667
2668 2668 if (!smid || smid > instance->max_fw_cmds + 1) {
2669 2669 con_log(CL_ANN1, (CE_NOTE,
2670 2670 "Reply Desc at Break = %p Words = %" PRIx64,
2671 2671 (void *)desc, desc->Words));
2672 2672 break;
2673 2673 }
2674 2674
2675 2675 cmd = instance->cmd_list[smid - 1];
2676 2676 if (!cmd) {
2677 2677 con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2678 2678 "outstanding_cmd: Invalid command "
2679 2679 " or Poll commad Received in completion path"));
2680 2680 } else {
2681 2681 mutex_enter(&instance->cmd_pend_mtx);
2682 2682 if (cmd->sync_cmd == MRSAS_TRUE) {
2683 2683 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2684 2684 if (hdr) {
2685 2685 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2686 2686 "tbolt_process_outstanding_cmd:"
2687 2687 " mlist_del_init(&cmd->list)."));
2688 2688 mlist_del_init(&cmd->list);
2689 2689 }
2690 2690 } else {
2691 2691 pkt = cmd->pkt;
2692 2692 if (pkt) {
2693 2693 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2694 2694 "tbolt_process_outstanding_cmd:"
2695 2695 "mlist_del_init(&cmd->list)."));
2696 2696 mlist_del_init(&cmd->list);
2697 2697 }
2698 2698 }
2699 2699
2700 2700 mutex_exit(&instance->cmd_pend_mtx);
2701 2701
2702 2702 tbolt_complete_cmd(instance, cmd);
2703 2703 }
2704 2704 /* set it back to all 1s. */
2705 2705 desc->Words = -1LL;
2706 2706
2707 2707 instance->reply_read_index++;
2708 2708
2709 2709 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2710 2710 con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2711 2711 instance->reply_read_index = 0;
2712 2712 }
2713 2713
2714 2714 /* Get the next reply descriptor */
2715 2715 if (!instance->reply_read_index)
2716 2716 desc = instance->reply_frame_pool;
2717 2717 else
2718 2718 desc++;
2719 2719
2720 2720 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2721 2721
2722 2722 d_val.word = desc->Words;
2723 2723
2724 2724 con_log(CL_ANN1, (CE_NOTE,
2725 2725 "Next Reply Desc = %p Words = %" PRIx64,
2726 2726 (void *)desc, desc->Words));
2727 2727
2728 2728 replyType = replyDesc->ReplyFlags &
2729 2729 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2730 2730
2731 2731 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2732 2732 break;
2733 2733
2734 2734 } /* End of while loop. */
2735 2735
2736 2736 /* update replyIndex to FW */
2737 2737 WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2738 2738
2739 2739
2740 2740 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2741 2741 0, 0, DDI_DMA_SYNC_FORDEV);
2742 2742
2743 2743 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2744 2744 0, 0, DDI_DMA_SYNC_FORCPU);
2745 2745 return (DDI_INTR_CLAIMED);
2746 2746 }
2747 2747
2748 2748
2749 2749
2750 2750
2751 2751 /*
2752 2752 * complete_cmd_in_sync_mode - Completes an internal command
2753 2753 * @instance: Adapter soft state
2754 2754 * @cmd: Command to be completed
2755 2755 *
2756 2756 * The issue_cmd_in_sync_mode() function waits for a command to complete
2757 2757 * after it issues a command. This function wakes up that waiting routine by
2758 2758 * calling wake_up() on the wait queue.
2759 2759 */
2760 2760 void
2761 2761 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2762 2762 struct mrsas_cmd *cmd)
2763 2763 {
2764 2764
2765 2765 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2766 2766 &cmd->frame->io.cmd_status);
2767 2767
2768 2768 cmd->sync_cmd = MRSAS_FALSE;
2769 2769
2770 2770 mutex_enter(&instance->int_cmd_mtx);
2771 2771 if (cmd->cmd_status == ENODATA) {
2772 2772 cmd->cmd_status = 0;
2773 2773 }
2774 2774 cv_broadcast(&instance->int_cmd_cv);
2775 2775 mutex_exit(&instance->int_cmd_mtx);
2776 2776
2777 2777 }
2778 2778
2779 2779 /*
2780 2780 * mrsas_tbolt_get_ld_map_info - Returns ld_map structure
2781 2781 * instance: Adapter soft state
2782 2782 *
2783 2783 * Issues an internal command (DCMD) to get the FW's controller PD
2784 2784 * list structure. This information is mainly used to find out SYSTEM
2785 2785 * supported by the FW.
2786 2786 */
2787 2787 int
2788 2788 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2789 2789 {
2790 2790 int ret = 0;
2791 2791 struct mrsas_cmd *cmd = NULL;
2792 2792 struct mrsas_dcmd_frame *dcmd;
2793 2793 MR_FW_RAID_MAP_ALL *ci;
2794 2794 uint32_t ci_h = 0;
2795 2795 U32 size_map_info;
2796 2796
2797 2797 cmd = get_raid_msg_pkt(instance);
2798 2798
2799 2799 if (cmd == NULL) {
2800 2800 dev_err(instance->dip, CE_WARN,
2801 2801 "Failed to get a cmd from free-pool in get_ld_map_info()");
2802 2802 return (DDI_FAILURE);
2803 2803 }
2804 2804
2805 2805 dcmd = &cmd->frame->dcmd;
2806 2806
2807 2807 size_map_info = sizeof (MR_FW_RAID_MAP) +
2808 2808 (sizeof (MR_LD_SPAN_MAP) *
2809 2809 (MAX_LOGICAL_DRIVES - 1));
2810 2810
2811 2811 con_log(CL_ANN, (CE_NOTE,
2812 2812 "size_map_info : 0x%x", size_map_info));
2813 2813
2814 2814 ci = instance->ld_map[instance->map_id & 1];
2815 2815 ci_h = instance->ld_map_phy[instance->map_id & 1];
2816 2816
2817 2817 if (!ci) {
2818 2818 dev_err(instance->dip, CE_WARN,
2819 2819 "Failed to alloc mem for ld_map_info");
2820 2820 return_raid_msg_pkt(instance, cmd);
2821 2821 return (-1);
2822 2822 }
2823 2823
2824 2824 bzero(ci, sizeof (*ci));
2825 2825 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2826 2826
2827 2827 dcmd->cmd = MFI_CMD_OP_DCMD;
2828 2828 dcmd->cmd_status = 0xFF;
2829 2829 dcmd->sge_count = 1;
2830 2830 dcmd->flags = MFI_FRAME_DIR_READ;
2831 2831 dcmd->timeout = 0;
2832 2832 dcmd->pad_0 = 0;
2833 2833 dcmd->data_xfer_len = size_map_info;
2834 2834 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2835 2835 dcmd->sgl.sge32[0].phys_addr = ci_h;
2836 2836 dcmd->sgl.sge32[0].length = size_map_info;
2837 2837
2838 2838
2839 2839 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2840 2840
2841 2841 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2842 2842 ret = 0;
2843 2843 con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2844 2844 } else {
2845 2845 dev_err(instance->dip, CE_WARN, "Get LD Map Info failed");
2846 2846 ret = -1;
2847 2847 }
2848 2848
2849 2849 return_raid_msg_pkt(instance, cmd);
2850 2850
2851 2851 return (ret);
2852 2852 }
2853 2853
2854 2854 void
2855 2855 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2856 2856 {
2857 2857 uint32_t i;
2858 2858 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2859 2859 union desc_value d_val;
2860 2860
2861 2861 reply_desc = instance->reply_frame_pool;
2862 2862
2863 2863 for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2864 2864 d_val.word = reply_desc->Words;
2865 2865 con_log(CL_DLEVEL3, (CE_NOTE,
2866 2866 "i=%d, %x:%x",
2867 2867 i, d_val.u1.high, d_val.u1.low));
2868 2868 }
2869 2869 }
2870 2870
2871 2871 /*
2872 2872 * mrsas_tbolt_command_create - Create command for fast path.
2873 2873 * @io_info: MegaRAID IO request packet pointer.
2874 2874 * @ref_tag: Reference tag for RD/WRPROTECT
2875 2875 *
2876 2876 * Create the command for fast path.
2877 2877 */
2878 2878 void
2879 2879 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2880 2880 struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2881 2881 U32 ref_tag)
2882 2882 {
2883 2883 uint16_t EEDPFlags;
2884 2884 uint32_t Control;
2885 2885 ddi_acc_handle_t acc_handle =
2886 2886 instance->mpi2_frame_pool_dma_obj.acc_handle;
2887 2887
2888 2888 /* Prepare 32-byte CDB if DIF is supported on this device */
2889 2889 con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2890 2890
2891 2891 bzero(cdb, 32);
2892 2892
2893 2893 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2894 2894
2895 2895
2896 2896 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
2897 2897
2898 2898 if (io_info->isRead)
2899 2899 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2900 2900 else
2901 2901 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2902 2902
2903 2903 /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2904 2904 cdb[10] = MRSAS_RD_WR_PROTECT;
2905 2905
2906 2906 /* LOGICAL BLOCK ADDRESS */
2907 2907 cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2908 2908 cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2909 2909 cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2910 2910 cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2911 2911 cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2912 2912 cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2913 2913 cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2914 2914 cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2915 2915
2916 2916 /* Logical block reference tag */
2917 2917 ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2918 2918 BE_32(ref_tag));
2919 2919
2920 2920 ddi_put16(acc_handle,
2921 2921 &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
2922 2922
2923 2923 ddi_put32(acc_handle, &scsi_io_request->DataLength,
2924 2924 ((io_info->numBlocks)*512));
2925 2925 /* Specify 32-byte cdb */
2926 2926 ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
2927 2927
2928 2928 /* Transfer length */
2929 2929 cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
2930 2930 cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
2931 2931 cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
2932 2932 cdb[31] = (U8)((io_info->numBlocks) & 0xff);
2933 2933
2934 2934 /* set SCSI IO EEDPFlags */
2935 2935 EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
2936 2936 Control = ddi_get32(acc_handle, &scsi_io_request->Control);
2937 2937
2938 2938 /* set SCSI IO EEDPFlags bits */
2939 2939 if (io_info->isRead) {
2940 2940 /*
2941 2941 * For READ commands, the EEDPFlags shall be set to specify to
2942 2942 * Increment the Primary Reference Tag, to Check the Reference
2943 2943 * Tag, and to Check and Remove the Protection Information
2944 2944 * fields.
2945 2945 */
2946 2946 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2947 2947 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2948 2948 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
2949 2949 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
2950 2950 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2951 2951 } else {
2952 2952 /*
2953 2953 * For WRITE commands, the EEDPFlags shall be set to specify to
2954 2954 * Increment the Primary Reference Tag, and to Insert
2955 2955 * Protection Information fields.
2956 2956 */
2957 2957 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2958 2958 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2959 2959 }
2960 2960 Control |= (0x4 << 26);
2961 2961
2962 2962 ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
2963 2963 ddi_put32(acc_handle, &scsi_io_request->Control, Control);
2964 2964 ddi_put32(acc_handle,
2965 2965 &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
2966 2966 }
2967 2967
2968 2968
2969 2969 /*
2970 2970 * mrsas_tbolt_set_pd_lba - Sets PD LBA
2971 2971 * @cdb: CDB
2972 2972 * @cdb_size: CDB size
2973 2973 * @cdb_len_ptr: cdb length
2974 2974 * @start_blk: Start block of IO
2975 2975 * @num_blocks: Number of blocks
2976 2976 *
2977 2977 * Used to set the PD LBA in CDB for FP IOs
2978 2978 */
2979 2979 static void
2980 2980 mrsas_tbolt_set_pd_lba(U8 *cdb, size_t cdb_size, uint8_t *cdb_len_ptr,
2981 2981 U64 start_blk, U32 num_blocks)
2982 2982 {
2983 2983 U8 cdb_len = *cdb_len_ptr;
2984 2984 U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
2985 2985
2986 2986 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
2987 2987 if (((cdb_len == 12) || (cdb_len == 16)) &&
2988 2988 (start_blk <= 0xffffffff)) {
2989 2989 if (cdb_len == 16) {
2990 2990 con_log(CL_ANN,
2991 2991 (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
2992 2992 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
2993 2993 flagvals = cdb[1];
2994 2994 groupnum = cdb[14];
2995 2995 control = cdb[15];
2996 2996 } else {
2997 2997 con_log(CL_ANN,
2998 2998 (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
2999 2999 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3000 3000 flagvals = cdb[1];
3001 3001 groupnum = cdb[10];
3002 3002 control = cdb[11];
3003 3003 }
3004 3004
3005 3005 bzero(cdb, cdb_size);
3006 3006
3007 3007 cdb[0] = opcode;
3008 3008 cdb[1] = flagvals;
3009 3009 cdb[6] = groupnum;
3010 3010 cdb[9] = control;
3011 3011 /* Set transfer length */
3012 3012 cdb[8] = (U8)(num_blocks & 0xff);
3013 3013 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3014 3014 cdb_len = 10;
3015 3015 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3016 3016 /* Convert to 16 byte CDB for large LBA's */
3017 3017 con_log(CL_ANN,
3018 3018 (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3019 3019 switch (cdb_len) {
3020 3020 case 6:
3021 3021 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3022 3022 control = cdb[5];
3023 3023 break;
3024 3024 case 10:
3025 3025 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3026 3026 flagvals = cdb[1];
3027 3027 groupnum = cdb[6];
3028 3028 control = cdb[9];
3029 3029 break;
3030 3030 case 12:
3031 3031 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3032 3032 flagvals = cdb[1];
3033 3033 groupnum = cdb[10];
3034 3034 control = cdb[11];
3035 3035 break;
3036 3036 }
3037 3037
3038 3038 bzero(cdb, cdb_size);
3039 3039
3040 3040 cdb[0] = opcode;
3041 3041 cdb[1] = flagvals;
3042 3042 cdb[14] = groupnum;
3043 3043 cdb[15] = control;
3044 3044
3045 3045 /* Transfer length */
3046 3046 cdb[13] = (U8)(num_blocks & 0xff);
3047 3047 cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3048 3048 cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3049 3049 cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3050 3050
3051 3051 /* Specify 16-byte cdb */
3052 3052 cdb_len = 16;
3053 3053 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3054 3054 /* convert to 10 byte CDB */
3055 3055 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3056 3056 control = cdb[5];
3057 3057
3058 3058 bzero(cdb, cdb_size);
3059 3059 cdb[0] = opcode;
3060 3060 cdb[9] = control;
3061 3061
3062 3062 /* Set transfer length */
3063 3063 cdb[8] = (U8)(num_blocks & 0xff);
3064 3064 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3065 3065
3066 3066 /* Specify 10-byte cdb */
3067 3067 cdb_len = 10;
3068 3068 }
3069 3069
3070 3070
3071 3071 /* Fall through Normal case, just load LBA here */
3072 3072 switch (cdb_len) {
3073 3073 case 6:
3074 3074 {
3075 3075 U8 val = cdb[1] & 0xE0;
3076 3076 cdb[3] = (U8)(start_blk & 0xff);
3077 3077 cdb[2] = (U8)((start_blk >> 8) & 0xff);
3078 3078 cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3079 3079 break;
3080 3080 }
3081 3081 case 10:
3082 3082 cdb[5] = (U8)(start_blk & 0xff);
3083 3083 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3084 3084 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3085 3085 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3086 3086 break;
3087 3087 case 12:
3088 3088 cdb[5] = (U8)(start_blk & 0xff);
3089 3089 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3090 3090 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3091 3091 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3092 3092 break;
3093 3093
3094 3094 case 16:
3095 3095 cdb[9] = (U8)(start_blk & 0xff);
3096 3096 cdb[8] = (U8)((start_blk >> 8) & 0xff);
3097 3097 cdb[7] = (U8)((start_blk >> 16) & 0xff);
3098 3098 cdb[6] = (U8)((start_blk >> 24) & 0xff);
3099 3099 cdb[5] = (U8)((start_blk >> 32) & 0xff);
3100 3100 cdb[4] = (U8)((start_blk >> 40) & 0xff);
3101 3101 cdb[3] = (U8)((start_blk >> 48) & 0xff);
3102 3102 cdb[2] = (U8)((start_blk >> 56) & 0xff);
3103 3103 break;
3104 3104 }
3105 3105
3106 3106 *cdb_len_ptr = cdb_len;
3107 3107 }
3108 3108
3109 3109
3110 3110 static int
3111 3111 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3112 3112 {
3113 3113 MR_FW_RAID_MAP_ALL *ld_map;
3114 3114
3115 3115 if (!mrsas_tbolt_get_ld_map_info(instance)) {
3116 3116
3117 3117 ld_map = instance->ld_map[instance->map_id & 1];
3118 3118
3119 3119 con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3120 3120 ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3121 3121
3122 3122 if (MR_ValidateMapInfo(
3123 3123 instance->ld_map[instance->map_id & 1],
3124 3124 instance->load_balance_info)) {
3125 3125 con_log(CL_ANN,
3126 3126 (CE_CONT, "MR_ValidateMapInfo success"));
3127 3127
3128 3128 instance->fast_path_io = 1;
3129 3129 con_log(CL_ANN,
3130 3130 (CE_NOTE, "instance->fast_path_io %d",
3131 3131 instance->fast_path_io));
3132 3132
3133 3133 return (DDI_SUCCESS);
3134 3134 }
3135 3135
3136 3136 }
3137 3137
3138 3138 instance->fast_path_io = 0;
3139 3139 dev_err(instance->dip, CE_WARN, "MR_ValidateMapInfo failed");
3140 3140 con_log(CL_ANN, (CE_NOTE,
3141 3141 "instance->fast_path_io %d", instance->fast_path_io));
3142 3142
3143 3143 return (DDI_FAILURE);
3144 3144 }
3145 3145
3146 3146 /*
3147 3147 * Marks HBA as bad. This will be called either when an
3148 3148 * IO packet times out even after 3 FW resets
3149 3149 * or FW is found to be fault even after 3 continuous resets.
3150 3150 */
3151 3151
3152 3152 void
3153 3153 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3154 3154 {
3155 3155 dev_err(instance->dip, CE_NOTE, "TBOLT Kill adapter called");
3156 3156
3157 3157 if (instance->deadadapter == 1)
3158 3158 return;
3159 3159
3160 3160 con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3161 3161 "Writing to doorbell with MFI_STOP_ADP "));
3162 3162 mutex_enter(&instance->ocr_flags_mtx);
3163 3163 instance->deadadapter = 1;
3164 3164 mutex_exit(&instance->ocr_flags_mtx);
3165 3165 instance->func_ptr->disable_intr(instance);
3166 3166 WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3167 3167 /* Flush */
3168 3168 (void) RD_RESERVED0_REGISTER(instance);
3169 3169
3170 3170 (void) mrsas_print_pending_cmds(instance);
3171 3171 (void) mrsas_complete_pending_cmds(instance);
3172 3172 }
3173 3173
3174 3174 void
3175 3175 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3176 3176 {
3177 3177 int i;
3178 3178 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3179 3179 instance->reply_read_index = 0;
3180 3180
3181 3181 /* initializing reply address to 0xFFFFFFFF */
3182 3182 reply_desc = instance->reply_frame_pool;
3183 3183
3184 3184 for (i = 0; i < instance->reply_q_depth; i++) {
3185 3185 reply_desc->Words = (uint64_t)~0;
3186 3186 reply_desc++;
3187 3187 }
3188 3188 }
3189 3189
3190 3190 int
3191 3191 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3192 3192 {
3193 3193 uint32_t status = 0x00;
3194 3194 uint32_t retry = 0;
3195 3195 uint32_t cur_abs_reg_val;
3196 3196 uint32_t fw_state;
3197 3197 uint32_t abs_state;
3198 3198 uint32_t i;
3199 3199
3200 3200 if (instance->deadadapter == 1) {
3201 3201 dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3202 3202 "no more resets as HBA has been marked dead");
3203 3203 return (DDI_FAILURE);
3204 3204 }
3205 3205
3206 3206 mutex_enter(&instance->ocr_flags_mtx);
3207 3207 instance->adapterresetinprogress = 1;
3208 3208 mutex_exit(&instance->ocr_flags_mtx);
3209 3209
3210 3210 instance->func_ptr->disable_intr(instance);
3211 3211
3212 3212 /* Add delay in order to complete the ioctl & io cmds in-flight */
3213 3213 for (i = 0; i < 3000; i++)
3214 3214 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3215 3215
3216 3216 instance->reply_read_index = 0;
3217 3217
3218 3218 retry_reset:
3219 3219 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: Resetting TBOLT"));
3220 3220
3221 3221 /* Flush */
3222 3222 WR_TBOLT_IB_WRITE_SEQ(0x0, instance);
3223 3223 /* Write magic number */
3224 3224 WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3225 3225 WR_TBOLT_IB_WRITE_SEQ(0x4, instance);
3226 3226 WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3227 3227 WR_TBOLT_IB_WRITE_SEQ(0x2, instance);
3228 3228 WR_TBOLT_IB_WRITE_SEQ(0x7, instance);
3229 3229 WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3230 3230
3231 3231 con_log(CL_ANN1, (CE_NOTE,
3232 3232 "mrsas_tbolt_reset_ppc: magic number written "
3233 3233 "to write sequence register"));
3234 3234
3235 3235 /* Wait for the diag write enable (DRWE) bit to be set */
3236 3236 retry = 0;
3237 3237 status = RD_TBOLT_HOST_DIAG(instance);
3238 3238 while (!(status & DIAG_WRITE_ENABLE)) {
3239 3239 delay(100 * drv_usectohz(MILLISEC));
3240 3240 status = RD_TBOLT_HOST_DIAG(instance);
3241 3241 if (retry++ >= 100) {
3242 3242 dev_err(instance->dip, CE_WARN,
3243 3243 "%s(): timeout waiting for DRWE.", __func__);
3244 3244 return (DDI_FAILURE);
3245 3245 }
3246 3246 }
3247 3247
3248 3248 /* Send reset command */
3249 3249 WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3250 3250 delay(100 * drv_usectohz(MILLISEC));
3251 3251
3252 3252 /* Wait for reset bit to clear */
3253 3253 retry = 0;
3254 3254 status = RD_TBOLT_HOST_DIAG(instance);
3255 3255 while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3256 3256 delay(100 * drv_usectohz(MILLISEC));
3257 3257 status = RD_TBOLT_HOST_DIAG(instance);
3258 3258 if (retry++ == 100) {
3259 3259 /* Dont call kill adapter here */
3260 3260 /* RESET BIT ADAPTER is cleared by firmare */
3261 3261 /* mrsas_tbolt_kill_adapter(instance); */
3262 3262 dev_err(instance->dip, CE_WARN,
3263 3263 "%s(): RESET FAILED; return failure!!!", __func__);
3264 3264 return (DDI_FAILURE);
3265 3265 }
3266 3266 }
3267 3267
3268 3268 con_log(CL_ANN,
3269 3269 (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3270 3270
3271 3271 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3272 3272 retry = 0;
3273 3273 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3274 3274 delay(100 * drv_usectohz(MILLISEC));
3275 3275 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3276 3276 }
3277 3277 if (abs_state <= MFI_STATE_FW_INIT) {
3278 3278 dev_err(instance->dip, CE_WARN,
3279 3279 "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3280 3280 "state = 0x%x, RETRY RESET.", abs_state);
3281 3281 goto retry_reset;
3282 3282 }
3283 3283
3284 3284 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3285 3285 if (mfi_state_transition_to_ready(instance) ||
3286 3286 mrsas_debug_tbolt_fw_faults_after_ocr == 1) {
3287 3287 cur_abs_reg_val =
3288 3288 instance->func_ptr->read_fw_status_reg(instance);
3289 3289 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3290 3290
3291 3291 con_log(CL_ANN1, (CE_NOTE,
3292 3292 "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3293 3293 "FW state = 0x%x", fw_state));
3294 3294 if (mrsas_debug_tbolt_fw_faults_after_ocr == 1)
3295 3295 fw_state = MFI_STATE_FAULT;
3296 3296
3297 3297 con_log(CL_ANN,
3298 3298 (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
3299 3299 "FW state = 0x%x", fw_state));
3300 3300
3301 3301 if (fw_state == MFI_STATE_FAULT) {
3302 3302 /* increment the count */
3303 3303 instance->fw_fault_count_after_ocr++;
3304 3304 if (instance->fw_fault_count_after_ocr
3305 3305 < MAX_FW_RESET_COUNT) {
3306 3306 dev_err(instance->dip, CE_WARN,
3307 3307 "mrsas_tbolt_reset_ppc: "
3308 3308 "FW is in fault after OCR count %d "
3309 3309 "Retry Reset",
3310 3310 instance->fw_fault_count_after_ocr);
3311 3311 goto retry_reset;
3312 3312
3313 3313 } else {
3314 3314 dev_err(instance->dip, CE_WARN, "%s:"
3315 3315 "Max Reset Count exceeded >%d"
3316 3316 "Mark HBA as bad, KILL adapter",
3317 3317 __func__, MAX_FW_RESET_COUNT);
3318 3318
3319 3319 mrsas_tbolt_kill_adapter(instance);
3320 3320 return (DDI_FAILURE);
3321 3321 }
3322 3322 }
3323 3323 }
3324 3324
3325 3325 /* reset the counter as FW is up after OCR */
3326 3326 instance->fw_fault_count_after_ocr = 0;
3327 3327
3328 3328 mrsas_reset_reply_desc(instance);
3329 3329
3330 3330 abs_state = mrsas_issue_init_mpi2(instance);
3331 3331 if (abs_state == (uint32_t)DDI_FAILURE) {
3332 3332 dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3333 3333 "INIT failed Retrying Reset");
3334 3334 goto retry_reset;
3335 3335 }
3336 3336
3337 3337 (void) mrsas_print_pending_cmds(instance);
3338 3338
3339 3339 instance->func_ptr->enable_intr(instance);
3340 3340 instance->fw_outstanding = 0;
3341 3341
3342 3342 (void) mrsas_issue_pending_cmds(instance);
3343 3343
3344 3344 instance->aen_cmd->retry_count_for_ocr = 0;
3345 3345 instance->aen_cmd->drv_pkt_time = 0;
3346 3346
3347 3347 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3348 3348
3349 3349 mutex_enter(&instance->ocr_flags_mtx);
3350 3350 instance->adapterresetinprogress = 0;
3351 3351 mutex_exit(&instance->ocr_flags_mtx);
3352 3352
3353 3353 dev_err(instance->dip, CE_NOTE, "TBOLT adapter reset successfully");
3354 3354
3355 3355 return (DDI_SUCCESS);
3356 3356 }
3357 3357
3358 3358 /*
3359 3359 * mrsas_sync_map_info - Returns FW's ld_map structure
3360 3360 * @instance: Adapter soft state
3361 3361 *
3362 3362 * Issues an internal command (DCMD) to get the FW's controller PD
3363 3363 * list structure. This information is mainly used to find out SYSTEM
3364 3364 * supported by the FW.
3365 3365 */
3366 3366
3367 3367 static int
3368 3368 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3369 3369 {
3370 3370 int ret = 0, i;
3371 3371 struct mrsas_cmd *cmd = NULL;
3372 3372 struct mrsas_dcmd_frame *dcmd;
3373 3373 uint32_t size_sync_info, num_lds;
3374 3374 LD_TARGET_SYNC *ci = NULL;
3375 3375 MR_FW_RAID_MAP_ALL *map;
3376 3376 MR_LD_RAID *raid;
3377 3377 LD_TARGET_SYNC *ld_sync;
3378 3378 uint32_t ci_h = 0;
3379 3379 uint32_t size_map_info;
3380 3380
3381 3381 cmd = get_raid_msg_pkt(instance);
3382 3382
3383 3383 if (cmd == NULL) {
3384 3384 dev_err(instance->dip, CE_WARN,
3385 3385 "Failed to get a cmd from free-pool in "
3386 3386 "mrsas_tbolt_sync_map_info().");
3387 3387 return (DDI_FAILURE);
3388 3388 }
3389 3389
3390 3390 /* Clear the frame buffer and assign back the context id */
3391 3391 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3392 3392 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3393 3393 cmd->index);
3394 3394 bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3395 3395
3396 3396
3397 3397 map = instance->ld_map[instance->map_id & 1];
3398 3398
3399 3399 num_lds = map->raidMap.ldCount;
3400 3400
3401 3401 dcmd = &cmd->frame->dcmd;
3402 3402
3403 3403 size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3404 3404
3405 3405 con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3406 3406 size_sync_info, num_lds));
3407 3407
3408 3408 ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3409 3409
3410 3410 bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3411 3411 ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3412 3412
3413 3413 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3414 3414
3415 3415 ld_sync = (LD_TARGET_SYNC *)ci;
3416 3416
3417 3417 for (i = 0; i < num_lds; i++, ld_sync++) {
3418 3418 raid = MR_LdRaidGet(i, map);
3419 3419
3420 3420 con_log(CL_ANN1,
3421 3421 (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3422 3422 i, raid->seqNum, raid->flags.ldSyncRequired));
3423 3423
3424 3424 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3425 3425
3426 3426 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3427 3427 i, ld_sync->ldTargetId));
3428 3428
3429 3429 ld_sync->seqNum = raid->seqNum;
3430 3430 }
3431 3431
3432 3432
3433 3433 size_map_info = sizeof (MR_FW_RAID_MAP) +
3434 3434 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3435 3435
3436 3436 dcmd->cmd = MFI_CMD_OP_DCMD;
3437 3437 dcmd->cmd_status = 0xFF;
3438 3438 dcmd->sge_count = 1;
3439 3439 dcmd->flags = MFI_FRAME_DIR_WRITE;
3440 3440 dcmd->timeout = 0;
3441 3441 dcmd->pad_0 = 0;
3442 3442 dcmd->data_xfer_len = size_map_info;
3443 3443 ASSERT(num_lds <= 255);
3444 3444 dcmd->mbox.b[0] = (U8)num_lds;
3445 3445 dcmd->mbox.b[1] = 1; /* Pend */
3446 3446 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3447 3447 dcmd->sgl.sge32[0].phys_addr = ci_h;
3448 3448 dcmd->sgl.sge32[0].length = size_map_info;
3449 3449
3450 3450
3451 3451 instance->map_update_cmd = cmd;
3452 3452 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3453 3453
3454 3454 instance->func_ptr->issue_cmd(cmd, instance);
3455 3455
3456 3456 instance->unroll.syncCmd = 1;
3457 3457 con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3458 3458
3459 3459 return (ret);
3460 3460 }
3461 3461
3462 3462 /*
3463 3463 * abort_syncmap_cmd
3464 3464 */
3465 3465 int
3466 3466 abort_syncmap_cmd(struct mrsas_instance *instance,
3467 3467 struct mrsas_cmd *cmd_to_abort)
3468 3468 {
3469 3469 int ret = 0;
3470 3470
3471 3471 struct mrsas_cmd *cmd;
3472 3472 struct mrsas_abort_frame *abort_fr;
3473 3473
3474 3474 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3475 3475
3476 3476 cmd = get_raid_msg_mfi_pkt(instance);
3477 3477
3478 3478 if (!cmd) {
3479 3479 dev_err(instance->dip, CE_WARN,
3480 3480 "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3481 3481 return (DDI_FAILURE);
3482 3482 }
3483 3483 /* Clear the frame buffer and assign back the context id */
3484 3484 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3485 3485 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3486 3486 cmd->index);
3487 3487
3488 3488 abort_fr = &cmd->frame->abort;
3489 3489
3490 3490 /* prepare and issue the abort frame */
3491 3491 ddi_put8(cmd->frame_dma_obj.acc_handle,
3492 3492 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3493 3493 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3494 3494 MFI_CMD_STATUS_SYNC_MODE);
3495 3495 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3496 3496 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3497 3497 cmd_to_abort->index);
3498 3498 ddi_put32(cmd->frame_dma_obj.acc_handle,
3499 3499 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3500 3500 ddi_put32(cmd->frame_dma_obj.acc_handle,
3501 3501 &abort_fr->abort_mfi_phys_addr_hi, 0);
3502 3502
3503 3503 cmd->frame_count = 1;
3504 3504
3505 3505 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3506 3506
3507 3507 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3508 3508 con_log(CL_ANN1, (CE_WARN,
3509 3509 "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3510 3510 ret = -1;
3511 3511 } else {
3512 3512 ret = 0;
3513 3513 }
3514 3514
3515 3515 return_raid_msg_mfi_pkt(instance, cmd);
3516 3516
3517 3517 atomic_add_16(&instance->fw_outstanding, (-1));
3518 3518
3519 3519 return (ret);
3520 3520 }
3521 3521
3522 3522 /*
3523 3523 * Even though these functions were originally intended for 2208 only, it
3524 3524 * turns out they're useful for "Skinny" support as well. In a perfect world,
3525 3525 * these two functions would be either in mr_sas.c, or in their own new source
3526 3526 * file. Since this driver needs some cleanup anyway, keep this portion in
3527 3527 * mind as well.
3528 3528 */
3529 3529
3530 3530 int
3531 3531 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3532 3532 uint8_t lun, dev_info_t **ldip)
3533 3533 {
3534 3534 struct scsi_device *sd;
3535 3535 dev_info_t *child;
3536 3536 int rval, dtype;
3537 3537 struct mrsas_tbolt_pd_info *pds = NULL;
3538 3538
3539 3539 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3540 3540 tgt, lun));
3541 3541
3542 3542 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3543 3543 if (ldip) {
3544 3544 *ldip = child;
3545 3545 }
3546 3546 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3547 3547 rval = mrsas_service_evt(instance, tgt, 1,
3548 3548 MRSAS_EVT_UNCONFIG_TGT, NULL);
3549 3549 con_log(CL_ANN1, (CE_WARN,
3550 3550 "mr_sas:DELETING STALE ENTRY rval = %d "
3551 3551 "tgt id = %d", rval, tgt));
3552 3552 return (NDI_FAILURE);
3553 3553 }
3554 3554 return (NDI_SUCCESS);
3555 3555 }
3556 3556
3557 3557 pds = (struct mrsas_tbolt_pd_info *)
3558 3558 kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3559 3559 mrsas_tbolt_get_pd_info(instance, pds, tgt);
3560 3560 dtype = pds->scsiDevType;
3561 3561
3562 3562 /* Check for Disk */
3563 3563 if ((dtype == DTYPE_DIRECT)) {
3564 3564 if ((dtype == DTYPE_DIRECT) &&
3565 3565 (LE_16(pds->fwState) != PD_SYSTEM)) {
3566 3566 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3567 3567 return (NDI_FAILURE);
3568 3568 }
3569 3569 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3570 3570 sd->sd_address.a_hba_tran = instance->tran;
3571 3571 sd->sd_address.a_target = (uint16_t)tgt;
3572 3572 sd->sd_address.a_lun = (uint8_t)lun;
3573 3573
3574 3574 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3575 3575 rval = mrsas_config_scsi_device(instance, sd, ldip);
3576 3576 dev_err(instance->dip, CE_CONT,
3577 3577 "?Phys. device found: tgt %d dtype %d: %s\n",
3578 3578 tgt, dtype, sd->sd_inq->inq_vid);
3579 3579 } else {
3580 3580 rval = NDI_FAILURE;
3581 3581 con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3582 3582 "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3583 3583 tgt, dtype, sd->sd_inq->inq_vid));
3584 3584 }
3585 3585
3586 3586 /* sd_unprobe is blank now. Free buffer manually */
3587 3587 if (sd->sd_inq) {
3588 3588 kmem_free(sd->sd_inq, SUN_INQSIZE);
3589 3589 sd->sd_inq = (struct scsi_inquiry *)NULL;
3590 3590 }
3591 3591 kmem_free(sd, sizeof (struct scsi_device));
3592 3592 } else {
3593 3593 con_log(CL_ANN1, (CE_NOTE,
3594 3594 "?Device not supported: tgt %d lun %d dtype %d",
3595 3595 tgt, lun, dtype));
3596 3596 rval = NDI_FAILURE;
3597 3597 }
3598 3598
3599 3599 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3600 3600 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3601 3601 rval));
3602 3602 return (rval);
3603 3603 }
3604 3604
3605 3605 static void
3606 3606 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3607 3607 struct mrsas_tbolt_pd_info *pds, int tgt)
3608 3608 {
3609 3609 struct mrsas_cmd *cmd;
3610 3610 struct mrsas_dcmd_frame *dcmd;
3611 3611 dma_obj_t dcmd_dma_obj;
3612 3612
3613 3613 ASSERT(instance->tbolt || instance->skinny);
3614 3614
3615 3615 if (instance->tbolt)
3616 3616 cmd = get_raid_msg_pkt(instance);
3617 3617 else
3618 3618 cmd = mrsas_get_mfi_pkt(instance);
3619 3619
3620 3620 if (!cmd) {
3621 3621 con_log(CL_ANN1,
3622 3622 (CE_WARN, "Failed to get a cmd for get pd info"));
3623 3623 return;
3624 3624 }
3625 3625
3626 3626 /* Clear the frame buffer and assign back the context id */
3627 3627 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3628 3628 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3629 3629 cmd->index);
3630 3630
3631 3631
3632 3632 dcmd = &cmd->frame->dcmd;
3633 3633 dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3634 3634 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3635 3635 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3636 3636 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3637 3637 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3638 3638 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3639 3639
3640 3640 (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3641 3641 DDI_STRUCTURE_LE_ACC);
3642 3642 bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3643 3643 bzero(dcmd->mbox.b, 12);
3644 3644 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3645 3645 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3646 3646 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3647 3647 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3648 3648 MFI_FRAME_DIR_READ);
3649 3649 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3650 3650 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3651 3651 sizeof (struct mrsas_tbolt_pd_info));
3652 3652 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3653 3653 MR_DCMD_PD_GET_INFO);
3654 3654 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3655 3655 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3656 3656 sizeof (struct mrsas_tbolt_pd_info));
3657 3657 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3658 3658 dcmd_dma_obj.dma_cookie[0].dmac_address);
3659 3659
3660 3660 cmd->sync_cmd = MRSAS_TRUE;
3661 3661 cmd->frame_count = 1;
3662 3662
3663 3663 if (instance->tbolt)
3664 3664 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3665 3665
3666 3666 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3667 3667
3668 3668 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3669 3669 (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3670 3670 DDI_DEV_AUTOINCR);
3671 3671 (void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3672 3672
3673 3673 if (instance->tbolt)
3674 3674 return_raid_msg_pkt(instance, cmd);
3675 3675 else
3676 3676 mrsas_return_mfi_pkt(instance, cmd);
3677 3677 }
↓ open down ↓ |
2556 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX