1 /*
2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 * i.e. Thunderbolt and Invader
4 *
5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 * Copyright (c) 2008-2012, LSI Logic Corporation.
7 * All rights reserved.
8 *
9 * Version:
10 * Author:
11 * Swaminathan K S
12 * Arun Chandrashekhar
13 * Manju R
14 * Rasheed
15 * Shakeel Bukhari
16 */
17
18 /*
19 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
20 * Copyright 2015 Garrett D'Amore <garrett@damore.org>
21 */
22
23
24 #include <sys/types.h>
25 #include <sys/file.h>
26 #include <sys/atomic.h>
27 #include <sys/scsi/scsi.h>
28 #include <sys/byteorder.h>
29 #include "ld_pd_map.h"
30 #include "mr_sas.h"
31 #include "fusion.h"
32
33 /*
34 * FMA header files
35 */
36 #include <sys/ddifm.h>
37 #include <sys/fm/protocol.h>
38 #include <sys/fm/util.h>
39 #include <sys/fm/io/ddi.h>
40
41
42 /* Pre-TB command size and TB command size. */
43 #define MR_COMMAND_SIZE (64*20) /* 1280 bytes */
44 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
45 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
46 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
47 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
48 extern ddi_dma_attr_t mrsas_generic_dma_attr;
49 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
50 extern struct ddi_device_acc_attr endian_attr;
51 extern int debug_level_g;
52 extern unsigned int enable_fp;
53 volatile int dump_io_wait_time = 90;
54 extern volatile int debug_timeout_g;
55 extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
56 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
57 extern void push_pending_mfi_pkt(struct mrsas_instance *,
58 struct mrsas_cmd *);
59 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
60 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
61
62 /* Local static prototypes. */
63 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
64 struct scsi_address *, struct scsi_pkt *, uchar_t *);
65 static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
66 U64 start_blk, U32 num_blocks);
67 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
68 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
69 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
70 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
71 #ifdef PDSUPPORT
72 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
73 struct mrsas_tbolt_pd_info *, int);
74 #endif /* PDSUPPORT */
75
76 static int debug_tbolt_fw_faults_after_ocr_g = 0;
77
78 /*
79 * destroy_mfi_mpi_frame_pool
80 */
81 void
82 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
83 {
84 int i;
85
86 struct mrsas_cmd *cmd;
87
88 /* return all mfi frames to pool */
89 for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
90 cmd = instance->cmd_list[i];
91 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
92 (void) mrsas_free_dma_obj(instance,
93 cmd->frame_dma_obj);
94 }
95 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
96 }
97 }
98
99 /*
100 * destroy_mpi2_frame_pool
101 */
102 void
103 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
104 {
105
106 if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
107 (void) mrsas_free_dma_obj(instance,
108 instance->mpi2_frame_pool_dma_obj);
109 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
110 }
111 }
112
113
114 /*
115 * mrsas_tbolt_free_additional_dma_buffer
116 */
117 void
118 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
119 {
120 int i;
121
122 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
123 (void) mrsas_free_dma_obj(instance,
124 instance->mfi_internal_dma_obj);
125 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
126 }
127 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
128 (void) mrsas_free_dma_obj(instance,
129 instance->mfi_evt_detail_obj);
130 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
131 }
132
133 for (i = 0; i < 2; i++) {
134 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
135 (void) mrsas_free_dma_obj(instance,
136 instance->ld_map_obj[i]);
137 instance->ld_map_obj[i].status = DMA_OBJ_FREED;
138 }
139 }
140 }
141
142
143 /*
144 * free_req_desc_pool
145 */
146 void
147 free_req_rep_desc_pool(struct mrsas_instance *instance)
148 {
149 if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
150 (void) mrsas_free_dma_obj(instance,
151 instance->request_desc_dma_obj);
152 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
153 }
154
155 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
156 (void) mrsas_free_dma_obj(instance,
157 instance->reply_desc_dma_obj);
158 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
159 }
160
161
162 }
163
164
165 /*
166 * ThunderBolt(TB) Request Message Frame Pool
167 */
168 int
169 create_mpi2_frame_pool(struct mrsas_instance *instance)
170 {
171 int i = 0;
172 uint16_t max_cmd;
173 uint32_t sgl_sz;
174 uint32_t raid_msg_size;
175 uint32_t total_size;
176 uint32_t offset;
177 uint32_t io_req_base_phys;
178 uint8_t *io_req_base;
179 struct mrsas_cmd *cmd;
180
181 max_cmd = instance->max_fw_cmds;
182
183 sgl_sz = 1024;
184 raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
185
186 /* Allocating additional 256 bytes to accomodate SMID 0. */
187 total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
188 (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
189
190 con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
191 "max_cmd %x", max_cmd));
192
193 con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
194 "request message frame pool size %x", total_size));
195
196 /*
197 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
198 * and then split the memory to 1024 commands. Each command should be
199 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
200 * within it. Further refer the "alloc_req_rep_desc" function where
201 * we allocate request/reply descriptors queues for a clue.
202 */
203
204 instance->mpi2_frame_pool_dma_obj.size = total_size;
205 instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
206 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
207 0xFFFFFFFFU;
208 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
209 0xFFFFFFFFU;
210 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
211 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
212
213 if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
214 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
215 cmn_err(CE_WARN,
216 "mr_sas: could not alloc mpi2 frame pool");
217 return (DDI_FAILURE);
218 }
219
220 bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
221 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
222
223 instance->io_request_frames =
224 (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
225 instance->io_request_frames_phy =
226 (uint32_t)
227 instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
228
229 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
230 (void *)instance->io_request_frames));
231
232 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
233 instance->io_request_frames_phy));
234
235 io_req_base = (uint8_t *)instance->io_request_frames +
236 MRSAS_THUNDERBOLT_MSG_SIZE;
237 io_req_base_phys = instance->io_request_frames_phy +
238 MRSAS_THUNDERBOLT_MSG_SIZE;
239
240 con_log(CL_DLEVEL3, (CE_NOTE,
241 "io req_base_phys 0x%x", io_req_base_phys));
242
243 for (i = 0; i < max_cmd; i++) {
244 cmd = instance->cmd_list[i];
245
246 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
247
248 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
249 ((uint8_t *)io_req_base + offset);
250 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
251
252 cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
253 (max_cmd * raid_msg_size) + i * sgl_sz);
254
255 cmd->sgl_phys_addr = (io_req_base_phys +
256 (max_cmd * raid_msg_size) + i * sgl_sz);
257
258 cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
259 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
260 (i * SENSE_LENGTH));
261
262 cmd->sense_phys_addr1 = (io_req_base_phys +
263 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
264 (i * SENSE_LENGTH));
265
266
267 cmd->SMID = i + 1;
268
269 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
270 cmd->index, (void *)cmd->scsi_io_request));
271
272 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
273 cmd->index, cmd->scsi_io_request_phys_addr));
274
275 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
276 cmd->index, (void *)cmd->sense1));
277
278 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
279 cmd->index, cmd->sense_phys_addr1));
280
281 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
282 cmd->index, (void *)cmd->sgl));
283
284 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
285 cmd->index, cmd->sgl_phys_addr));
286 }
287
288 return (DDI_SUCCESS);
289
290 }
291
292
293 /*
294 * alloc_additional_dma_buffer for AEN
295 */
296 int
297 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
298 {
299 uint32_t internal_buf_size = PAGESIZE*2;
300 int i;
301
302 /* Initialize buffer status as free */
303 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
304 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
305 instance->ld_map_obj[0].status = DMA_OBJ_FREED;
306 instance->ld_map_obj[1].status = DMA_OBJ_FREED;
307
308
309 instance->mfi_internal_dma_obj.size = internal_buf_size;
310 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
311 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
312 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
313 0xFFFFFFFFU;
314 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
315
316 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
317 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
318 cmn_err(CE_WARN,
319 "mr_sas: could not alloc reply queue");
320 return (DDI_FAILURE);
321 }
322
323 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
324
325 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
326 instance->internal_buf =
327 (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
328 instance->internal_buf_dmac_add =
329 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
330 instance->internal_buf_size = internal_buf_size;
331
332 /* allocate evt_detail */
333 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
334 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
335 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
336 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
337 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
338 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
339
340 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
341 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
342 cmn_err(CE_WARN, "mrsas_tbolt_alloc_additional_dma_buffer: "
343 "could not allocate data transfer buffer.");
344 goto fail_tbolt_additional_buff;
345 }
346
347 bzero(instance->mfi_evt_detail_obj.buffer,
348 sizeof (struct mrsas_evt_detail));
349
350 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
351
352 instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
353 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
354
355 for (i = 0; i < 2; i++) {
356 /* allocate the data transfer buffer */
357 instance->ld_map_obj[i].size = instance->size_map_info;
358 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
359 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
360 instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
361 0xFFFFFFFFU;
362 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
363 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
364
365 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
366 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
367 cmn_err(CE_WARN,
368 "could not allocate data transfer buffer.");
369 goto fail_tbolt_additional_buff;
370 }
371
372 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
373
374 bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
375
376 instance->ld_map[i] =
377 (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
378 instance->ld_map_phy[i] = (uint32_t)instance->
379 ld_map_obj[i].dma_cookie[0].dmac_address;
380
381 con_log(CL_DLEVEL3, (CE_NOTE,
382 "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
383
384 con_log(CL_DLEVEL3, (CE_NOTE,
385 "size_map_info 0x%x", instance->size_map_info));
386 }
387
388 return (DDI_SUCCESS);
389
390 fail_tbolt_additional_buff:
391 mrsas_tbolt_free_additional_dma_buffer(instance);
392
393 return (DDI_FAILURE);
394 }
395
396 MRSAS_REQUEST_DESCRIPTOR_UNION *
397 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
398 {
399 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
400
401 if (index > instance->max_fw_cmds) {
402 con_log(CL_ANN1, (CE_NOTE,
403 "Invalid SMID 0x%x request for descriptor", index));
404 con_log(CL_ANN1, (CE_NOTE,
405 "max_fw_cmds : 0x%x", instance->max_fw_cmds));
406 return (NULL);
407 }
408
409 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
410 ((char *)instance->request_message_pool +
411 (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
412
413 con_log(CL_ANN1, (CE_NOTE,
414 "request descriptor : 0x%08lx", (unsigned long)req_desc));
415
416 con_log(CL_ANN1, (CE_NOTE,
417 "request descriptor base phy : 0x%08lx",
418 (unsigned long)instance->request_message_pool_phy));
419
420 return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
421 }
422
423
424 /*
425 * Allocate Request and Reply Queue Descriptors.
426 */
427 int
428 alloc_req_rep_desc(struct mrsas_instance *instance)
429 {
430 uint32_t request_q_sz, reply_q_sz;
431 int i, max_reply_q_sz;
432 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
433
434 /*
435 * ThunderBolt(TB) There's no longer producer consumer mechanism.
436 * Once we have an interrupt we are supposed to scan through the list of
437 * reply descriptors and process them accordingly. We would be needing
438 * to allocate memory for 1024 reply descriptors
439 */
440
441 /* Allocate Reply Descriptors */
442 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
443 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
444
445 /* reply queue size should be multiple of 16 */
446 max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
447
448 reply_q_sz = 8 * max_reply_q_sz;
449
450
451 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
452 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
453
454 instance->reply_desc_dma_obj.size = reply_q_sz;
455 instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
456 instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
457 instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
458 instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
459 instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
460
461 if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
462 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
463 cmn_err(CE_WARN,
464 "mr_sas: could not alloc reply queue");
465 return (DDI_FAILURE);
466 }
467
468 bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
469 instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
470
471 /* virtual address of reply queue */
472 instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
473 instance->reply_desc_dma_obj.buffer);
474
475 instance->reply_q_depth = max_reply_q_sz;
476
477 con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
478 instance->reply_q_depth));
479
480 con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
481 (void *)instance->reply_frame_pool));
482
483 /* initializing reply address to 0xFFFFFFFF */
484 reply_desc = instance->reply_frame_pool;
485
486 for (i = 0; i < instance->reply_q_depth; i++) {
487 reply_desc->Words = (uint64_t)~0;
488 reply_desc++;
489 }
490
491
492 instance->reply_frame_pool_phy =
493 (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
494
495 con_log(CL_ANN1, (CE_NOTE,
496 "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
497
498
499 instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
500 reply_q_sz);
501
502 con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
503 instance->reply_pool_limit_phy));
504
505
506 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
507 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
508
509 /* Allocate Request Descriptors */
510 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
511 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
512
513 request_q_sz = 8 *
514 (instance->max_fw_cmds);
515
516 instance->request_desc_dma_obj.size = request_q_sz;
517 instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
518 instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
519 instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
520 0xFFFFFFFFU;
521 instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
522 instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
523
524 if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
525 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
526 cmn_err(CE_WARN,
527 "mr_sas: could not alloc request queue desc");
528 goto fail_undo_reply_queue;
529 }
530
531 bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
532 instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
533
534 /* virtual address of request queue desc */
535 instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
536 (instance->request_desc_dma_obj.buffer);
537
538 instance->request_message_pool_phy =
539 (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
540
541 return (DDI_SUCCESS);
542
543 fail_undo_reply_queue:
544 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
545 (void) mrsas_free_dma_obj(instance,
546 instance->reply_desc_dma_obj);
547 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
548 }
549
550 return (DDI_FAILURE);
551 }
552
553 /*
554 * mrsas_alloc_cmd_pool_tbolt
555 *
556 * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
557 * routine
558 */
559 int
560 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
561 {
562 int i;
563 int count;
564 uint32_t max_cmd;
565 uint32_t reserve_cmd;
566 size_t sz;
567
568 struct mrsas_cmd *cmd;
569
570 max_cmd = instance->max_fw_cmds;
571 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
572 "max_cmd %x", max_cmd));
573
574
575 sz = sizeof (struct mrsas_cmd *) * max_cmd;
576
577 /*
578 * instance->cmd_list is an array of struct mrsas_cmd pointers.
579 * Allocate the dynamic array first and then allocate individual
580 * commands.
581 */
582 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
583
584 /* create a frame pool and assign one frame to each cmd */
585 for (count = 0; count < max_cmd; count++) {
586 instance->cmd_list[count] =
587 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
588 }
589
590 /* add all the commands to command pool */
591
592 INIT_LIST_HEAD(&instance->cmd_pool_list);
593 INIT_LIST_HEAD(&instance->cmd_pend_list);
594 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
595
596 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
597
598 /* cmd index 0 reservered for IOC INIT */
599 for (i = 1; i < reserve_cmd; i++) {
600 cmd = instance->cmd_list[i];
601 cmd->index = i;
602 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
603 }
604
605
606 for (i = reserve_cmd; i < max_cmd; i++) {
607 cmd = instance->cmd_list[i];
608 cmd->index = i;
609 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
610 }
611
612 return (DDI_SUCCESS);
613
614 mrsas_undo_cmds:
615 if (count > 0) {
616 /* free each cmd */
617 for (i = 0; i < count; i++) {
618 if (instance->cmd_list[i] != NULL) {
619 kmem_free(instance->cmd_list[i],
620 sizeof (struct mrsas_cmd));
621 }
622 instance->cmd_list[i] = NULL;
623 }
624 }
625
626 mrsas_undo_cmd_list:
627 if (instance->cmd_list != NULL)
628 kmem_free(instance->cmd_list, sz);
629 instance->cmd_list = NULL;
630
631 return (DDI_FAILURE);
632 }
633
634
635 /*
636 * free_space_for_mpi2
637 */
638 void
639 free_space_for_mpi2(struct mrsas_instance *instance)
640 {
641 /* already freed */
642 if (instance->cmd_list == NULL) {
643 return;
644 }
645
646 /* First free the additional DMA buffer */
647 mrsas_tbolt_free_additional_dma_buffer(instance);
648
649 /* Free the request/reply descriptor pool */
650 free_req_rep_desc_pool(instance);
651
652 /* Free the MPI message pool */
653 destroy_mpi2_frame_pool(instance);
654
655 /* Free the MFI frame pool */
656 destroy_mfi_frame_pool(instance);
657
658 /* Free all the commands in the cmd_list */
659 /* Free the cmd_list buffer itself */
660 mrsas_free_cmd_pool(instance);
661 }
662
663
664 /*
665 * ThunderBolt(TB) memory allocations for commands/messages/frames.
666 */
667 int
668 alloc_space_for_mpi2(struct mrsas_instance *instance)
669 {
670 /* Allocate command pool (memory for cmd_list & individual commands) */
671 if (mrsas_alloc_cmd_pool_tbolt(instance)) {
672 cmn_err(CE_WARN, "Error creating cmd pool");
673 return (DDI_FAILURE);
674 }
675
676 /* Initialize single reply size and Message size */
677 instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
678 instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
679
680 instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
681 (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
682 sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
683 instance->max_sge_in_chain = (MR_COMMAND_SIZE -
684 MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
685
686 /* Reduce SG count by 1 to take care of group cmds feature in FW */
687 instance->max_num_sge = (instance->max_sge_in_main_msg +
688 instance->max_sge_in_chain - 2);
689 instance->chain_offset_mpt_msg =
690 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
691 instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
692 sizeof (MPI2_SGE_IO_UNION)) / 16;
693 instance->reply_read_index = 0;
694
695
696 /* Allocate Request and Reply descriptors Array */
697 /* Make sure the buffer is aligned to 8 for req/rep descriptor Pool */
698 if (alloc_req_rep_desc(instance)) {
699 cmn_err(CE_WARN,
700 "Error, allocating memory for descripter-pool");
701 goto mpi2_undo_cmd_pool;
702 }
703 con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
704 instance->request_message_pool_phy));
705
706
707 /* Allocate MFI Frame pool - for MPI-MFI passthru commands */
708 if (create_mfi_frame_pool(instance)) {
709 cmn_err(CE_WARN,
710 "Error, allocating memory for MFI frame-pool");
711 goto mpi2_undo_descripter_pool;
712 }
713
714
715 /* Allocate MPI2 Message pool */
716 /*
717 * Make sure the buffer is alligned to 256 for raid message packet
718 * create a io request pool and assign one frame to each cmd
719 */
720
721 if (create_mpi2_frame_pool(instance)) {
722 cmn_err(CE_WARN,
723 "Error, allocating memory for MPI2 Message-pool");
724 goto mpi2_undo_mfi_frame_pool;
725 }
726
727 #ifdef DEBUG
728 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
729 instance->max_sge_in_main_msg));
730 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
731 instance->max_sge_in_chain));
732 con_log(CL_ANN1, (CE_CONT,
733 "[max_sge]0x%x", instance->max_num_sge));
734 con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
735 instance->chain_offset_mpt_msg));
736 con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
737 instance->chain_offset_io_req));
738 #endif
739
740
741 /* Allocate additional dma buffer */
742 if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
743 cmn_err(CE_WARN,
744 "Error, allocating tbolt additional DMA buffer");
745 goto mpi2_undo_message_pool;
746 }
747
748 return (DDI_SUCCESS);
749
750 mpi2_undo_message_pool:
751 destroy_mpi2_frame_pool(instance);
752
753 mpi2_undo_mfi_frame_pool:
754 destroy_mfi_frame_pool(instance);
755
756 mpi2_undo_descripter_pool:
757 free_req_rep_desc_pool(instance);
758
759 mpi2_undo_cmd_pool:
760 mrsas_free_cmd_pool(instance);
761
762 return (DDI_FAILURE);
763 }
764
765
766 /*
767 * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
768 */
769 int
770 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
771 {
772
773 /*
774 * Reduce the max supported cmds by 1. This is to ensure that the
775 * reply_q_sz (1 more than the max cmd that driver may send)
776 * does not exceed max cmds that the FW can support
777 */
778
779 if (instance->max_fw_cmds > 1008) {
780 instance->max_fw_cmds = 1008;
781 instance->max_fw_cmds = instance->max_fw_cmds-1;
782 }
783
784 con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
785 " instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
786
787
788 /* create a pool of commands */
789 if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
790 cmn_err(CE_WARN,
791 " alloc_space_for_mpi2() failed.");
792
793 return (DDI_FAILURE);
794 }
795
796 /* Send ioc init message */
797 /* NOTE: the issue_init call does FMA checking already. */
798 if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
799 cmn_err(CE_WARN,
800 " mrsas_issue_init_mpi2() failed.");
801
802 goto fail_init_fusion;
803 }
804
805 instance->unroll.alloc_space_mpi2 = 1;
806
807 con_log(CL_ANN, (CE_NOTE,
808 "mrsas_init_adapter_tbolt: SUCCESSFUL"));
809
810 return (DDI_SUCCESS);
811
812 fail_init_fusion:
813 free_space_for_mpi2(instance);
814
815 return (DDI_FAILURE);
816 }
817
818
819
820 /*
821 * init_mpi2
822 */
823 int
824 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
825 {
826 dma_obj_t init2_dma_obj;
827 int ret_val = DDI_SUCCESS;
828
829 /* allocate DMA buffer for IOC INIT message */
830 init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
831 init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
832 init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
833 init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
834 init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
835 init2_dma_obj.dma_attr.dma_attr_align = 256;
836
837 if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
838 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
839 cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 "
840 "could not allocate data transfer buffer.");
841 return (DDI_FAILURE);
842 }
843 (void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
844
845 con_log(CL_ANN1, (CE_NOTE,
846 "mrsas_issue_init_mpi2 _phys adr: %x",
847 init2_dma_obj.dma_cookie[0].dmac_address));
848
849
850 /* Initialize and send ioc init message */
851 ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
852 if (ret_val == DDI_FAILURE) {
853 con_log(CL_ANN1, (CE_WARN,
854 "mrsas_issue_init_mpi2: Failed"));
855 goto fail_init_mpi2;
856 }
857
858 /* free IOC init DMA buffer */
859 if (mrsas_free_dma_obj(instance, init2_dma_obj)
860 != DDI_SUCCESS) {
861 con_log(CL_ANN1, (CE_WARN,
862 "mrsas_issue_init_mpi2: Free Failed"));
863 return (DDI_FAILURE);
864 }
865
866 /* Get/Check and sync ld_map info */
867 instance->map_id = 0;
868 if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
869 (void) mrsas_tbolt_sync_map_info(instance);
870
871
872 /* No mrsas_cmd to send, so send NULL. */
873 if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
874 goto fail_init_mpi2;
875
876 con_log(CL_ANN, (CE_NOTE,
877 "mrsas_issue_init_mpi2: SUCCESSFUL"));
878
879 return (DDI_SUCCESS);
880
881 fail_init_mpi2:
882 (void) mrsas_free_dma_obj(instance, init2_dma_obj);
883
884 return (DDI_FAILURE);
885 }
886
887 static int
888 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
889 {
890 int numbytes;
891 uint16_t flags;
892 struct mrsas_init_frame2 *mfiFrameInit2;
893 struct mrsas_header *frame_hdr;
894 Mpi2IOCInitRequest_t *init;
895 struct mrsas_cmd *cmd = NULL;
896 struct mrsas_drv_ver drv_ver_info;
897 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
898
899 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
900
901
902 #ifdef DEBUG
903 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
904 (int)sizeof (*mfiFrameInit2)));
905 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
906 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
907 (int)sizeof (struct mrsas_init_frame2)));
908 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
909 (int)sizeof (Mpi2IOCInitRequest_t)));
910 #endif
911
912 init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
913 numbytes = sizeof (*init);
914 bzero(init, numbytes);
915
916 ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
917 MPI2_FUNCTION_IOC_INIT);
918
919 ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
920 MPI2_WHOINIT_HOST_DRIVER);
921
922 /* set MsgVersion and HeaderVersion host driver was built with */
923 ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
924 MPI2_VERSION);
925
926 ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
927 MPI2_HEADER_VERSION);
928
929 ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
930 instance->raid_io_msg_size / 4);
931
932 ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
933 0);
934
935 ddi_put16(mpi2_dma_obj->acc_handle,
936 &init->ReplyDescriptorPostQueueDepth,
937 instance->reply_q_depth);
938 /*
939 * These addresses are set using the DMA cookie addresses from when the
940 * memory was allocated. Sense buffer hi address should be 0.
941 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
942 */
943
944 ddi_put32(mpi2_dma_obj->acc_handle,
945 &init->SenseBufferAddressHigh, 0);
946
947 ddi_put64(mpi2_dma_obj->acc_handle,
948 (uint64_t *)&init->SystemRequestFrameBaseAddress,
949 instance->io_request_frames_phy);
950
951 ddi_put64(mpi2_dma_obj->acc_handle,
952 &init->ReplyDescriptorPostQueueAddress,
953 instance->reply_frame_pool_phy);
954
955 ddi_put64(mpi2_dma_obj->acc_handle,
956 &init->ReplyFreeQueueAddress, 0);
957
958 cmd = instance->cmd_list[0];
959 if (cmd == NULL) {
960 return (DDI_FAILURE);
961 }
962 cmd->retry_count_for_ocr = 0;
963 cmd->pkt = NULL;
964 cmd->drv_pkt_time = 0;
965
966 mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
967 con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
968
969 frame_hdr = &cmd->frame->hdr;
970
971 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
972 MFI_CMD_STATUS_POLL_MODE);
973
974 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
975
976 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
977
978 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
979
980 con_log(CL_ANN, (CE_CONT,
981 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
982
983 /* Init the MFI Header */
984 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
985 &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
986
987 con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
988
989 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
990 &mfiFrameInit2->cmd_status,
991 MFI_STAT_INVALID_STATUS);
992
993 con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
994
995 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
996 &mfiFrameInit2->queue_info_new_phys_addr_lo,
997 mpi2_dma_obj->dma_cookie[0].dmac_address);
998
999 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1000 &mfiFrameInit2->data_xfer_len,
1001 sizeof (Mpi2IOCInitRequest_t));
1002
1003 con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1004 (int)init->ReplyDescriptorPostQueueAddress));
1005
1006 /* fill driver version information */
1007 fill_up_drv_ver(&drv_ver_info);
1008
1009 /* allocate the driver version data transfer buffer */
1010 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1011 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1012 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1013 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1014 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1015 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1016
1017 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1018 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1019 cmn_err(CE_WARN,
1020 "fusion init: Could not allocate driver version buffer.");
1021 return (DDI_FAILURE);
1022 }
1023 /* copy driver version to dma buffer */
1024 bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1025 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1026 (uint8_t *)drv_ver_info.drv_ver,
1027 (uint8_t *)instance->drv_ver_dma_obj.buffer,
1028 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1029
1030 /* send driver version physical address to firmware */
1031 ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1032 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1033
1034 con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1035 mfiFrameInit2->queue_info_new_phys_addr_lo,
1036 (int)sizeof (Mpi2IOCInitRequest_t)));
1037
1038 con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1039
1040 con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1041 cmd->scsi_io_request_phys_addr,
1042 (int)sizeof (struct mrsas_init_frame2)));
1043
1044 /* disable interrupts before sending INIT2 frame */
1045 instance->func_ptr->disable_intr(instance);
1046
1047 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
1048 instance->request_message_pool;
1049 req_desc->Words = cmd->scsi_io_request_phys_addr;
1050 req_desc->MFAIo.RequestFlags =
1051 (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1052
1053 cmd->request_desc = req_desc;
1054
1055 /* issue the init frame */
1056 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1057
1058 con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1059 con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ",
1060 frame_hdr->cmd_status));
1061
1062 if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1063 &mfiFrameInit2->cmd_status) == 0) {
1064 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1065 } else {
1066 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1067 mrsas_dump_reply_desc(instance);
1068 goto fail_ioc_init;
1069 }
1070
1071 mrsas_dump_reply_desc(instance);
1072
1073 instance->unroll.verBuff = 1;
1074
1075 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1076
1077 return (DDI_SUCCESS);
1078
1079
1080 fail_ioc_init:
1081
1082 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1083
1084 return (DDI_FAILURE);
1085 }
1086
1087 int
1088 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1089 {
1090 int i;
1091 uint32_t wait_time = dump_io_wait_time;
1092 for (i = 0; i < wait_time; i++) {
1093 /*
1094 * Check For Outstanding poll Commands
1095 * except ldsync command and aen command
1096 */
1097 if (instance->fw_outstanding <= 2) {
1098 break;
1099 }
1100 drv_usecwait(10*MILLISEC);
1101 /* complete commands from reply queue */
1102 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1103 }
1104 if (instance->fw_outstanding > 2) {
1105 return (1);
1106 }
1107 return (0);
1108 }
1109 /*
1110 * scsi_pkt handling
1111 *
1112 * Visible to the external world via the transport structure.
1113 */
1114
1115 int
1116 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1117 {
1118 struct mrsas_instance *instance = ADDR2MR(ap);
1119 struct scsa_cmd *acmd = PKT2CMD(pkt);
1120 struct mrsas_cmd *cmd = NULL;
1121 uchar_t cmd_done = 0;
1122
1123 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1124 if (instance->deadadapter == 1) {
1125 cmn_err(CE_WARN,
1126 "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1127 "for IO, as the HBA doesnt take any more IOs");
1128 if (pkt) {
1129 pkt->pkt_reason = CMD_DEV_GONE;
1130 pkt->pkt_statistics = STAT_DISCON;
1131 }
1132 return (TRAN_FATAL_ERROR);
1133 }
1134 if (instance->adapterresetinprogress) {
1135 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1136 "returning mfi_pkt and setting TRAN_BUSY\n"));
1137 return (TRAN_BUSY);
1138 }
1139 (void) mrsas_tbolt_prepare_pkt(acmd);
1140
1141 cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1142
1143 /*
1144 * Check if the command is already completed by the mrsas_build_cmd()
1145 * routine. In which case the busy_flag would be clear and scb will be
1146 * NULL and appropriate reason provided in pkt_reason field
1147 */
1148 if (cmd_done) {
1149 pkt->pkt_reason = CMD_CMPLT;
1150 pkt->pkt_scbp[0] = STATUS_GOOD;
1151 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1152 | STATE_SENT_CMD;
1153 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1154 (*pkt->pkt_comp)(pkt);
1155 }
1156
1157 return (TRAN_ACCEPT);
1158 }
1159
1160 if (cmd == NULL) {
1161 return (TRAN_BUSY);
1162 }
1163
1164
1165 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1166 if (instance->fw_outstanding > instance->max_fw_cmds) {
1167 cmn_err(CE_WARN,
1168 "Command Queue Full... Returning BUSY");
1169 return_raid_msg_pkt(instance, cmd);
1170 return (TRAN_BUSY);
1171 }
1172
1173 /* Synchronize the Cmd frame for the controller */
1174 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1175 DDI_DMA_SYNC_FORDEV);
1176
1177 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1178 "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1179 cmd->index, cmd->SMID));
1180
1181 instance->func_ptr->issue_cmd(cmd, instance);
1182 } else {
1183 instance->func_ptr->issue_cmd(cmd, instance);
1184 (void) wait_for_outstanding_poll_io(instance);
1185 (void) mrsas_common_check(instance, cmd);
1186 }
1187
1188 return (TRAN_ACCEPT);
1189 }
1190
1191 /*
1192 * prepare the pkt:
1193 * the pkt may have been resubmitted or just reused so
1194 * initialize some fields and do some checks.
1195 */
1196 static int
1197 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1198 {
1199 struct scsi_pkt *pkt = CMD2PKT(acmd);
1200
1201
1202 /*
1203 * Reinitialize some fields that need it; the packet may
1204 * have been resubmitted
1205 */
1206 pkt->pkt_reason = CMD_CMPLT;
1207 pkt->pkt_state = 0;
1208 pkt->pkt_statistics = 0;
1209 pkt->pkt_resid = 0;
1210
1211 /*
1212 * zero status byte.
1213 */
1214 *(pkt->pkt_scbp) = 0;
1215
1216 return (0);
1217 }
1218
1219
1220 int
1221 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1222 struct scsa_cmd *acmd,
1223 struct mrsas_cmd *cmd,
1224 Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1225 uint32_t *datalen)
1226 {
1227 uint32_t MaxSGEs;
1228 int sg_to_process;
1229 uint32_t i, j;
1230 uint32_t numElements, endElement;
1231 Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
1232 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
1233 ddi_acc_handle_t acc_handle =
1234 instance->mpi2_frame_pool_dma_obj.acc_handle;
1235 uint16_t devid = instance->device_id;
1236
1237 con_log(CL_ANN1, (CE_NOTE,
1238 "chkpnt: Building Chained SGL :%d", __LINE__));
1239
1240 /* Calulate SGE size in number of Words(32bit) */
1241 /* Clear the datalen before updating it. */
1242 *datalen = 0;
1243
1244 MaxSGEs = instance->max_sge_in_main_msg;
1245
1246 ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1247 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1248
1249 /* set data transfer flag. */
1250 if (acmd->cmd_flags & CFLAG_DMASEND) {
1251 ddi_put32(acc_handle, &scsi_raid_io->Control,
1252 MPI2_SCSIIO_CONTROL_WRITE);
1253 } else {
1254 ddi_put32(acc_handle, &scsi_raid_io->Control,
1255 MPI2_SCSIIO_CONTROL_READ);
1256 }
1257
1258
1259 numElements = acmd->cmd_cookiecnt;
1260
1261 con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1262
1263 if (numElements > instance->max_num_sge) {
1264 con_log(CL_ANN, (CE_NOTE,
1265 "[Max SGE Count Exceeded]:%x", numElements));
1266 return (numElements);
1267 }
1268
1269 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1270 (uint8_t)numElements);
1271
1272 /* set end element in main message frame */
1273 endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1274
1275 /* prepare the scatter-gather list for the firmware */
1276 scsi_raid_io_sgl_ieee =
1277 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1278
1279 if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1280 (devid == PCI_DEVICE_ID_LSI_FURY)) {
1281 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1282 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1283
1284 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1285 }
1286
1287 for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1288 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1289 acmd->cmd_dmacookies[i].dmac_laddress);
1290
1291 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1292 acmd->cmd_dmacookies[i].dmac_size);
1293
1294 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1295
1296 if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1297 (devid == PCI_DEVICE_ID_LSI_FURY)) {
1298 if (i == (numElements - 1)) {
1299 ddi_put8(acc_handle,
1300 &scsi_raid_io_sgl_ieee->Flags,
1301 IEEE_SGE_FLAGS_END_OF_LIST);
1302 }
1303 }
1304
1305 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1306
1307 #ifdef DEBUG
1308 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1309 scsi_raid_io_sgl_ieee->Address));
1310 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1311 scsi_raid_io_sgl_ieee->Length));
1312 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1313 scsi_raid_io_sgl_ieee->Flags));
1314 #endif
1315
1316 }
1317
1318 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1319
1320 /* check if chained SGL required */
1321 if (i < numElements) {
1322
1323 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1324
1325 if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1326 (devid == PCI_DEVICE_ID_LSI_FURY)) {
1327 uint16_t ioFlags =
1328 ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1329
1330 if ((ioFlags &
1331 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1332 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1333 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1334 (U8)instance->chain_offset_io_req);
1335 } else {
1336 ddi_put8(acc_handle,
1337 &scsi_raid_io->ChainOffset, 0);
1338 }
1339 } else {
1340 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1341 (U8)instance->chain_offset_io_req);
1342 }
1343
1344 /* prepare physical chain element */
1345 ieeeChainElement = scsi_raid_io_sgl_ieee;
1346
1347 ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1348
1349 if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1350 (devid == PCI_DEVICE_ID_LSI_FURY)) {
1351 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1352 IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1353 } else {
1354 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1355 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1356 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1357 }
1358
1359 ddi_put32(acc_handle, &ieeeChainElement->Length,
1360 (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1361
1362 ddi_put64(acc_handle, &ieeeChainElement->Address,
1363 (U64)cmd->sgl_phys_addr);
1364
1365 sg_to_process = numElements - i;
1366
1367 con_log(CL_ANN1, (CE_NOTE,
1368 "[Additional SGE Count]:%x", endElement));
1369
1370 /* point to the chained SGL buffer */
1371 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1372
1373 /* build rest of the SGL in chained buffer */
1374 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1375 con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1376
1377 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1378 acmd->cmd_dmacookies[i].dmac_laddress);
1379
1380 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1381 acmd->cmd_dmacookies[i].dmac_size);
1382
1383 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1384
1385 if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1386 (devid == PCI_DEVICE_ID_LSI_FURY)) {
1387 if (i == (numElements - 1)) {
1388 ddi_put8(acc_handle,
1389 &scsi_raid_io_sgl_ieee->Flags,
1390 IEEE_SGE_FLAGS_END_OF_LIST);
1391 }
1392 }
1393
1394 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1395
1396 #if DEBUG
1397 con_log(CL_DLEVEL1, (CE_NOTE,
1398 "[SGL Address]: %" PRIx64,
1399 scsi_raid_io_sgl_ieee->Address));
1400 con_log(CL_DLEVEL1, (CE_NOTE,
1401 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1402 con_log(CL_DLEVEL1, (CE_NOTE,
1403 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1404 #endif
1405
1406 i++;
1407 }
1408 }
1409
1410 return (0);
1411 } /*end of BuildScatterGather */
1412
1413
1414 /*
1415 * build_cmd
1416 */
1417 static struct mrsas_cmd *
1418 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1419 struct scsi_pkt *pkt, uchar_t *cmd_done)
1420 {
1421 uint8_t fp_possible = 0;
1422 uint32_t index;
1423 uint32_t lba_count = 0;
1424 uint32_t start_lba_hi = 0;
1425 uint32_t start_lba_lo = 0;
1426 uint16_t devid = instance->device_id;
1427 ddi_acc_handle_t acc_handle =
1428 instance->mpi2_frame_pool_dma_obj.acc_handle;
1429 struct mrsas_cmd *cmd = NULL;
1430 struct scsa_cmd *acmd = PKT2CMD(pkt);
1431 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
1432 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1433 uint32_t datalen;
1434 struct IO_REQUEST_INFO io_info;
1435 MR_FW_RAID_MAP_ALL *local_map_ptr;
1436 uint16_t pd_cmd_cdblen;
1437
1438 con_log(CL_DLEVEL1, (CE_NOTE,
1439 "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1440
1441 /* find out if this is logical or physical drive command. */
1442 acmd->islogical = MRDRV_IS_LOGICAL(ap);
1443 acmd->device_id = MAP_DEVICE_ID(instance, ap);
1444
1445 *cmd_done = 0;
1446
1447 /* get the command packet */
1448 if (!(cmd = get_raid_msg_pkt(instance))) {
1449 return (NULL);
1450 }
1451
1452 index = cmd->index;
1453 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
1454 ReqDescUnion->Words = 0;
1455 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1456 ReqDescUnion->SCSIIO.RequestFlags =
1457 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1458 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1459
1460
1461 cmd->request_desc = ReqDescUnion;
1462 cmd->pkt = pkt;
1463 cmd->cmd = acmd;
1464
1465 /* lets get the command directions */
1466 if (acmd->cmd_flags & CFLAG_DMASEND) {
1467 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1468 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1469 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1470 DDI_DMA_SYNC_FORDEV);
1471 }
1472 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1473 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1474 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1475 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1476 DDI_DMA_SYNC_FORCPU);
1477 }
1478 } else {
1479 con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1480 }
1481
1482
1483 /* get SCSI_IO raid message frame pointer */
1484 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1485
1486 /* zero out SCSI_IO raid message frame */
1487 bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1488
1489 /* Set the ldTargetId set by BuildRaidContext() */
1490 ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1491 acmd->device_id);
1492
1493 /* Copy CDB to scsi_io_request message frame */
1494 ddi_rep_put8(acc_handle,
1495 (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1496 acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1497
1498 /*
1499 * Just the CDB length, rest of the Flags are zero
1500 * This will be modified later.
1501 */
1502 ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1503
1504 pd_cmd_cdblen = acmd->cmd_cdblen;
1505
1506 switch (pkt->pkt_cdbp[0]) {
1507 case SCMD_READ:
1508 case SCMD_WRITE:
1509 case SCMD_READ_G1:
1510 case SCMD_WRITE_G1:
1511 case SCMD_READ_G4:
1512 case SCMD_WRITE_G4:
1513 case SCMD_READ_G5:
1514 case SCMD_WRITE_G5:
1515
1516 if (acmd->islogical) {
1517 /* Initialize sense Information */
1518 if (cmd->sense1 == NULL) {
1519 con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1520 "Sense buffer ptr NULL "));
1521 }
1522 bzero(cmd->sense1, SENSE_LENGTH);
1523 con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1524 "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1525
1526 if (acmd->cmd_cdblen == CDB_GROUP0) {
1527 /* 6-byte cdb */
1528 lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1529 start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1530 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1531 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1532 << 16));
1533 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
1534 /* 10-byte cdb */
1535 lba_count =
1536 (((uint16_t)(pkt->pkt_cdbp[8])) |
1537 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1538
1539 start_lba_lo =
1540 (((uint32_t)(pkt->pkt_cdbp[5])) |
1541 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1542 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1543 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1544
1545 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
1546 /* 12-byte cdb */
1547 lba_count = (
1548 ((uint32_t)(pkt->pkt_cdbp[9])) |
1549 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1550 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1551 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1552
1553 start_lba_lo =
1554 (((uint32_t)(pkt->pkt_cdbp[5])) |
1555 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1556 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1557 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1558
1559 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
1560 /* 16-byte cdb */
1561 lba_count = (
1562 ((uint32_t)(pkt->pkt_cdbp[13])) |
1563 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1564 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1565 ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1566
1567 start_lba_lo = (
1568 ((uint32_t)(pkt->pkt_cdbp[9])) |
1569 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1570 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1571 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1572
1573 start_lba_hi = (
1574 ((uint32_t)(pkt->pkt_cdbp[5])) |
1575 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1576 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1577 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1578 }
1579
1580 if (instance->tbolt &&
1581 ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1582 cmn_err(CE_WARN, " IO SECTOR COUNT exceeds "
1583 "controller limit 0x%x sectors",
1584 lba_count);
1585 }
1586
1587 bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1588 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1589 start_lba_lo;
1590 io_info.numBlocks = lba_count;
1591 io_info.ldTgtId = acmd->device_id;
1592
1593 if (acmd->cmd_flags & CFLAG_DMASEND)
1594 io_info.isRead = 0;
1595 else
1596 io_info.isRead = 1;
1597
1598
1599 /* Acquire SYNC MAP UPDATE lock */
1600 mutex_enter(&instance->sync_map_mtx);
1601
1602 local_map_ptr =
1603 instance->ld_map[(instance->map_id & 1)];
1604
1605 if ((MR_TargetIdToLdGet(
1606 acmd->device_id, local_map_ptr) >=
1607 MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1608 cmn_err(CE_NOTE, "Fast Path NOT Possible, "
1609 "targetId >= MAX_LOGICAL_DRIVES || "
1610 "!instance->fast_path_io");
1611 fp_possible = 0;
1612 /* Set Regionlock flags to BYPASS */
1613 /* io_request->RaidContext.regLockFlags = 0; */
1614 ddi_put8(acc_handle,
1615 &scsi_raid_io->RaidContext.regLockFlags, 0);
1616 } else {
1617 if (MR_BuildRaidContext(instance, &io_info,
1618 &scsi_raid_io->RaidContext, local_map_ptr))
1619 fp_possible = io_info.fpOkForIo;
1620 }
1621
1622 if (!enable_fp)
1623 fp_possible = 0;
1624
1625 con_log(CL_ANN1, (CE_NOTE, "enable_fp %d "
1626 "instance->fast_path_io %d fp_possible %d",
1627 enable_fp, instance->fast_path_io, fp_possible));
1628
1629 if (fp_possible) {
1630
1631 /* Check for DIF enabled LD */
1632 if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1633 /* Prepare 32 Byte CDB for DIF capable Disk */
1634 mrsas_tbolt_prepare_cdb(instance,
1635 scsi_raid_io->CDB.CDB32,
1636 &io_info, scsi_raid_io, start_lba_lo);
1637 } else {
1638 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1639 (uint8_t *)&pd_cmd_cdblen,
1640 io_info.pdBlock, io_info.numBlocks);
1641 ddi_put16(acc_handle,
1642 &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1643 }
1644
1645 ddi_put8(acc_handle, &scsi_raid_io->Function,
1646 MPI2_FUNCTION_SCSI_IO_REQUEST);
1647
1648 ReqDescUnion->SCSIIO.RequestFlags =
1649 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1650 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1651
1652 if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1653 (devid == PCI_DEVICE_ID_LSI_FURY)) {
1654 uint8_t regLockFlags = ddi_get8(acc_handle,
1655 &scsi_raid_io->RaidContext.regLockFlags);
1656 uint16_t IoFlags = ddi_get16(acc_handle,
1657 &scsi_raid_io->IoFlags);
1658
1659 if (regLockFlags == REGION_TYPE_UNUSED)
1660 ReqDescUnion->SCSIIO.RequestFlags =
1661 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1662 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1663
1664 IoFlags |=
1665 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1666 regLockFlags |=
1667 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1668 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1669
1670 ddi_put8(acc_handle,
1671 &scsi_raid_io->ChainOffset, 0);
1672 ddi_put8(acc_handle,
1673 &scsi_raid_io->RaidContext.nsegType,
1674 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1675 MPI2_TYPE_CUDA));
1676 ddi_put8(acc_handle,
1677 &scsi_raid_io->RaidContext.regLockFlags,
1678 regLockFlags);
1679 ddi_put16(acc_handle,
1680 &scsi_raid_io->IoFlags, IoFlags);
1681 }
1682
1683 if ((instance->load_balance_info[
1684 acmd->device_id].loadBalanceFlag) &&
1685 (io_info.isRead)) {
1686 io_info.devHandle =
1687 get_updated_dev_handle(&instance->
1688 load_balance_info[acmd->device_id],
1689 &io_info);
1690 cmd->load_balance_flag |=
1691 MEGASAS_LOAD_BALANCE_FLAG;
1692 } else {
1693 cmd->load_balance_flag &=
1694 ~MEGASAS_LOAD_BALANCE_FLAG;
1695 }
1696
1697 ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1698 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1699 io_info.devHandle);
1700
1701 } else {
1702 ddi_put8(acc_handle, &scsi_raid_io->Function,
1703 MPI2_FUNCTION_LD_IO_REQUEST);
1704
1705 ddi_put16(acc_handle,
1706 &scsi_raid_io->DevHandle, acmd->device_id);
1707
1708 ReqDescUnion->SCSIIO.RequestFlags =
1709 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1710 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1711
1712 ddi_put16(acc_handle,
1713 &scsi_raid_io->RaidContext.timeoutValue,
1714 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1715
1716 if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1717 (devid == PCI_DEVICE_ID_LSI_FURY)) {
1718 uint8_t regLockFlags = ddi_get8(acc_handle,
1719 &scsi_raid_io->RaidContext.regLockFlags);
1720
1721 if (regLockFlags == REGION_TYPE_UNUSED) {
1722 ReqDescUnion->SCSIIO.RequestFlags =
1723 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1724 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1725 }
1726
1727 regLockFlags |=
1728 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1729 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1730
1731 ddi_put8(acc_handle,
1732 &scsi_raid_io->RaidContext.nsegType,
1733 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1734 MPI2_TYPE_CUDA));
1735 ddi_put8(acc_handle,
1736 &scsi_raid_io->RaidContext.regLockFlags,
1737 regLockFlags);
1738 }
1739 } /* Not FP */
1740
1741 /* Release SYNC MAP UPDATE lock */
1742 mutex_exit(&instance->sync_map_mtx);
1743
1744
1745 /*
1746 * Set sense buffer physical address/length in scsi_io_request.
1747 */
1748 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1749 cmd->sense_phys_addr1);
1750 ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength,
1751 SENSE_LENGTH);
1752
1753 /* Construct SGL */
1754 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1755 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1756
1757 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1758 scsi_raid_io, &datalen);
1759
1760 ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1761
1762 break;
1763 #ifndef PDSUPPORT /* if PDSUPPORT, skip break and fall through */
1764 } else {
1765 break;
1766 #endif
1767 }
1768 /* fall through For all non-rd/wr cmds */
1769 default:
1770 switch (pkt->pkt_cdbp[0]) {
1771 case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1772 return_raid_msg_pkt(instance, cmd);
1773 *cmd_done = 1;
1774 return (NULL);
1775 }
1776
1777 case SCMD_MODE_SENSE:
1778 case SCMD_MODE_SENSE_G1: {
1779 union scsi_cdb *cdbp;
1780 uint16_t page_code;
1781
1782 cdbp = (void *)pkt->pkt_cdbp;
1783 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1784 switch (page_code) {
1785 case 0x3:
1786 case 0x4:
1787 (void) mrsas_mode_sense_build(pkt);
1788 return_raid_msg_pkt(instance, cmd);
1789 *cmd_done = 1;
1790 return (NULL);
1791 }
1792 break;
1793 }
1794
1795 default: {
1796 /*
1797 * Here we need to handle PASSTHRU for
1798 * Logical Devices. Like Inquiry etc.
1799 */
1800
1801 if (!(acmd->islogical)) {
1802
1803 /* Acquire SYNC MAP UPDATE lock */
1804 mutex_enter(&instance->sync_map_mtx);
1805
1806 local_map_ptr =
1807 instance->ld_map[(instance->map_id & 1)];
1808
1809 ddi_put8(acc_handle, &scsi_raid_io->Function,
1810 MPI2_FUNCTION_SCSI_IO_REQUEST);
1811
1812 ReqDescUnion->SCSIIO.RequestFlags =
1813 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1814 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1815
1816 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1817 local_map_ptr->raidMap.
1818 devHndlInfo[acmd->device_id].curDevHdl);
1819
1820
1821 /* Set regLockFlasgs to REGION_TYPE_BYPASS */
1822 ddi_put8(acc_handle,
1823 &scsi_raid_io->RaidContext.regLockFlags, 0);
1824 ddi_put64(acc_handle,
1825 &scsi_raid_io->RaidContext.regLockRowLBA,
1826 0);
1827 ddi_put32(acc_handle,
1828 &scsi_raid_io->RaidContext.regLockLength,
1829 0);
1830 ddi_put8(acc_handle,
1831 &scsi_raid_io->RaidContext.RAIDFlags,
1832 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1833 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1834 ddi_put16(acc_handle,
1835 &scsi_raid_io->RaidContext.timeoutValue,
1836 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1837 ddi_put16(acc_handle,
1838 &scsi_raid_io->RaidContext.ldTargetId,
1839 acmd->device_id);
1840 ddi_put8(acc_handle,
1841 &scsi_raid_io->LUN[1], acmd->lun);
1842
1843 /* Release SYNC MAP UPDATE lock */
1844 mutex_exit(&instance->sync_map_mtx);
1845
1846 } else {
1847 ddi_put8(acc_handle, &scsi_raid_io->Function,
1848 MPI2_FUNCTION_LD_IO_REQUEST);
1849 ddi_put8(acc_handle,
1850 &scsi_raid_io->LUN[1], acmd->lun);
1851 ddi_put16(acc_handle,
1852 &scsi_raid_io->DevHandle, acmd->device_id);
1853 ReqDescUnion->SCSIIO.RequestFlags =
1854 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1855 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1856 }
1857
1858 /*
1859 * Set sense buffer physical address/length in
1860 * scsi_io_request.
1861 */
1862 ddi_put32(acc_handle,
1863 &scsi_raid_io->SenseBufferLowAddress,
1864 cmd->sense_phys_addr1);
1865 ddi_put8(acc_handle,
1866 &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1867
1868 /* Construct SGL */
1869 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1870 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1871
1872 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1873 scsi_raid_io, &datalen);
1874
1875 ddi_put32(acc_handle,
1876 &scsi_raid_io->DataLength, datalen);
1877
1878
1879 con_log(CL_ANN, (CE_CONT,
1880 "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1881 pkt->pkt_cdbp[0], acmd->device_id));
1882 con_log(CL_DLEVEL1, (CE_CONT,
1883 "data length = %x\n",
1884 scsi_raid_io->DataLength));
1885 con_log(CL_DLEVEL1, (CE_CONT,
1886 "cdb length = %x\n",
1887 acmd->cmd_cdblen));
1888 }
1889 break;
1890 }
1891
1892 }
1893
1894 return (cmd);
1895 }
1896
1897 uint32_t
1898 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1899 {
1900 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1901 }
1902
1903 void
1904 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1905 {
1906 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1907 atomic_inc_16(&instance->fw_outstanding);
1908
1909 struct scsi_pkt *pkt;
1910
1911 con_log(CL_ANN1,
1912 (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1913
1914 con_log(CL_DLEVEL1, (CE_CONT,
1915 " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1916 con_log(CL_DLEVEL1, (CE_CONT,
1917 " [req desc low part] %x \n",
1918 (uint_t)(req_desc->Words & 0xffffffffff)));
1919 con_log(CL_DLEVEL1, (CE_CONT,
1920 " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1921 pkt = cmd->pkt;
1922
1923 if (pkt) {
1924 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1925 "ISSUED CMD TO FW : called : cmd:"
1926 ": %p instance : %p pkt : %p pkt_time : %x\n",
1927 gethrtime(), (void *)cmd, (void *)instance,
1928 (void *)pkt, cmd->drv_pkt_time));
1929 if (instance->adapterresetinprogress) {
1930 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1931 con_log(CL_ANN, (CE_NOTE,
1932 "TBOLT Reset the scsi_pkt timer"));
1933 } else {
1934 push_pending_mfi_pkt(instance, cmd);
1935 }
1936
1937 } else {
1938 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1939 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
1940 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
1941 }
1942
1943 /* Issue the command to the FW */
1944 mutex_enter(&instance->reg_write_mtx);
1945 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1946 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1947 mutex_exit(&instance->reg_write_mtx);
1948 }
1949
1950 /*
1951 * issue_cmd_in_sync_mode
1952 */
1953 int
1954 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
1955 struct mrsas_cmd *cmd)
1956 {
1957 int i;
1958 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
1959 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1960
1961 struct mrsas_header *hdr;
1962 hdr = (struct mrsas_header *)&cmd->frame->hdr;
1963
1964 con_log(CL_ANN,
1965 (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
1966 cmd->SMID));
1967
1968
1969 if (instance->adapterresetinprogress) {
1970 cmd->drv_pkt_time = ddi_get16
1971 (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
1972 if (cmd->drv_pkt_time < debug_timeout_g)
1973 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1974 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
1975 "RESET-IN-PROGRESS, issue cmd & return."));
1976
1977 mutex_enter(&instance->reg_write_mtx);
1978 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1979 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1980 mutex_exit(&instance->reg_write_mtx);
1981
1982 return (DDI_SUCCESS);
1983 } else {
1984 con_log(CL_ANN1, (CE_NOTE,
1985 "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
1986 push_pending_mfi_pkt(instance, cmd);
1987 }
1988
1989 con_log(CL_DLEVEL2, (CE_NOTE,
1990 "HighQport offset :%p",
1991 (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
1992 con_log(CL_DLEVEL2, (CE_NOTE,
1993 "LowQport offset :%p",
1994 (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
1995
1996 cmd->sync_cmd = MRSAS_TRUE;
1997 cmd->cmd_status = ENODATA;
1998
1999
2000 mutex_enter(&instance->reg_write_mtx);
2001 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2002 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2003 mutex_exit(&instance->reg_write_mtx);
2004
2005 con_log(CL_ANN1, (CE_NOTE,
2006 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2007 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2008 (uint_t)(req_desc->Words & 0xffffffff)));
2009
2010 mutex_enter(&instance->int_cmd_mtx);
2011 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2012 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2013 }
2014 mutex_exit(&instance->int_cmd_mtx);
2015
2016
2017 if (i < (msecs -1)) {
2018 return (DDI_SUCCESS);
2019 } else {
2020 return (DDI_FAILURE);
2021 }
2022 }
2023
2024 /*
2025 * issue_cmd_in_poll_mode
2026 */
2027 int
2028 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2029 struct mrsas_cmd *cmd)
2030 {
2031 int i;
2032 uint16_t flags;
2033 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2034 struct mrsas_header *frame_hdr;
2035
2036 con_log(CL_ANN,
2037 (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2038 cmd->SMID));
2039
2040 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2041
2042 frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2043 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2044 MFI_CMD_STATUS_POLL_MODE);
2045 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2046 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2047 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2048
2049 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2050 (uint_t)(req_desc->Words & 0xffffffff)));
2051 con_log(CL_ANN1, (CE_NOTE,
2052 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2053
2054 /* issue the frame using inbound queue port */
2055 mutex_enter(&instance->reg_write_mtx);
2056 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2057 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2058 mutex_exit(&instance->reg_write_mtx);
2059
2060 for (i = 0; i < msecs && (
2061 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2062 == MFI_CMD_STATUS_POLL_MODE); i++) {
2063 /* wait for cmd_status to change from 0xFF */
2064 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2065 }
2066
2067 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2068 &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2069 con_log(CL_ANN1, (CE_NOTE,
2070 " cmd failed %" PRIx64, (req_desc->Words)));
2071 return (DDI_FAILURE);
2072 }
2073
2074 return (DDI_SUCCESS);
2075 }
2076
2077 void
2078 tbolt_enable_intr(struct mrsas_instance *instance)
2079 {
2080 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2081 /* writel(~0, ®s->outbound_intr_status); */
2082 /* readl(®s->outbound_intr_status); */
2083
2084 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2085
2086 /* dummy read to force PCI flush */
2087 (void) RD_OB_INTR_MASK(instance);
2088
2089 }
2090
2091 void
2092 tbolt_disable_intr(struct mrsas_instance *instance)
2093 {
2094 uint32_t mask = 0xFFFFFFFF;
2095
2096 WR_OB_INTR_MASK(mask, instance);
2097
2098 /* Dummy readl to force pci flush */
2099
2100 (void) RD_OB_INTR_MASK(instance);
2101 }
2102
2103
2104 int
2105 tbolt_intr_ack(struct mrsas_instance *instance)
2106 {
2107 uint32_t status;
2108
2109 /* check if it is our interrupt */
2110 status = RD_OB_INTR_STATUS(instance);
2111 con_log(CL_ANN1, (CE_NOTE,
2112 "chkpnt: Entered tbolt_intr_ack status = %d", status));
2113
2114 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2115 return (DDI_INTR_UNCLAIMED);
2116 }
2117
2118 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2119 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2120 return (DDI_INTR_UNCLAIMED);
2121 }
2122
2123 if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2124 /* clear the interrupt by writing back the same value */
2125 WR_OB_INTR_STATUS(status, instance);
2126 /* dummy READ */
2127 (void) RD_OB_INTR_STATUS(instance);
2128 }
2129 return (DDI_INTR_CLAIMED);
2130 }
2131
2132 /*
2133 * get_raid_msg_pkt : Get a command from the free pool
2134 * After successful allocation, the caller of this routine
2135 * must clear the frame buffer (memset to zero) before
2136 * using the packet further.
2137 *
2138 * ***** Note *****
2139 * After clearing the frame buffer the context id of the
2140 * frame buffer SHOULD be restored back.
2141 */
2142
2143 struct mrsas_cmd *
2144 get_raid_msg_pkt(struct mrsas_instance *instance)
2145 {
2146 mlist_t *head = &instance->cmd_pool_list;
2147 struct mrsas_cmd *cmd = NULL;
2148
2149 mutex_enter(&instance->cmd_pool_mtx);
2150 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2151
2152
2153 if (!mlist_empty(head)) {
2154 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2155 mlist_del_init(head->next);
2156 }
2157 if (cmd != NULL) {
2158 cmd->pkt = NULL;
2159 cmd->retry_count_for_ocr = 0;
2160 cmd->drv_pkt_time = 0;
2161 }
2162 mutex_exit(&instance->cmd_pool_mtx);
2163
2164 if (cmd != NULL)
2165 bzero(cmd->scsi_io_request,
2166 sizeof (Mpi2RaidSCSIIORequest_t));
2167 return (cmd);
2168 }
2169
2170 struct mrsas_cmd *
2171 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2172 {
2173 mlist_t *head = &instance->cmd_app_pool_list;
2174 struct mrsas_cmd *cmd = NULL;
2175
2176 mutex_enter(&instance->cmd_app_pool_mtx);
2177 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2178
2179 if (!mlist_empty(head)) {
2180 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2181 mlist_del_init(head->next);
2182 }
2183 if (cmd != NULL) {
2184 cmd->retry_count_for_ocr = 0;
2185 cmd->drv_pkt_time = 0;
2186 cmd->pkt = NULL;
2187 cmd->request_desc = NULL;
2188
2189 }
2190
2191 mutex_exit(&instance->cmd_app_pool_mtx);
2192
2193 if (cmd != NULL) {
2194 bzero(cmd->scsi_io_request,
2195 sizeof (Mpi2RaidSCSIIORequest_t));
2196 }
2197
2198 return (cmd);
2199 }
2200
2201 /*
2202 * return_raid_msg_pkt : Return a cmd to free command pool
2203 */
2204 void
2205 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2206 {
2207 mutex_enter(&instance->cmd_pool_mtx);
2208 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2209
2210
2211 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2212
2213 mutex_exit(&instance->cmd_pool_mtx);
2214 }
2215
2216 void
2217 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2218 {
2219 mutex_enter(&instance->cmd_app_pool_mtx);
2220 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2221
2222 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2223
2224 mutex_exit(&instance->cmd_app_pool_mtx);
2225 }
2226
2227
2228 void
2229 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2230 struct mrsas_cmd *cmd)
2231 {
2232 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2233 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
2234 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2235 uint32_t index;
2236 ddi_acc_handle_t acc_handle =
2237 instance->mpi2_frame_pool_dma_obj.acc_handle;
2238
2239 if (!instance->tbolt) {
2240 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2241 return;
2242 }
2243
2244 index = cmd->index;
2245
2246 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2247
2248 if (!ReqDescUnion) {
2249 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2250 return;
2251 }
2252
2253 con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2254
2255 ReqDescUnion->Words = 0;
2256
2257 ReqDescUnion->SCSIIO.RequestFlags =
2258 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2259 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2260
2261 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2262
2263 cmd->request_desc = ReqDescUnion;
2264
2265 /* get raid message frame pointer */
2266 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2267
2268 if ((instance->device_id == PCI_DEVICE_ID_LSI_INVADER) ||
2269 (instance->device_id == PCI_DEVICE_ID_LSI_FURY)) {
2270 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2271 &scsi_raid_io->SGL.IeeeChain;
2272 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2273 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2274 }
2275
2276 ddi_put8(acc_handle, &scsi_raid_io->Function,
2277 MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2278
2279 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2280 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2281
2282 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2283 (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2284
2285 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2286 cmd->sense_phys_addr1);
2287
2288
2289 scsi_raid_io_sgl_ieee =
2290 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2291
2292 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2293 (U64)cmd->frame_phys_addr);
2294
2295 ddi_put8(acc_handle,
2296 &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2297 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2298 /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2299 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2300
2301 con_log(CL_ANN1, (CE_NOTE,
2302 "[MFI CMD PHY ADDRESS]:%" PRIx64,
2303 scsi_raid_io_sgl_ieee->Address));
2304 con_log(CL_ANN1, (CE_NOTE,
2305 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2306 con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2307 scsi_raid_io_sgl_ieee->Flags));
2308 }
2309
2310
2311 void
2312 tbolt_complete_cmd(struct mrsas_instance *instance,
2313 struct mrsas_cmd *cmd)
2314 {
2315 uint8_t status;
2316 uint8_t extStatus;
2317 uint8_t arm;
2318 struct scsa_cmd *acmd;
2319 struct scsi_pkt *pkt;
2320 struct scsi_arq_status *arqstat;
2321 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2322 LD_LOAD_BALANCE_INFO *lbinfo;
2323 ddi_acc_handle_t acc_handle =
2324 instance->mpi2_frame_pool_dma_obj.acc_handle;
2325
2326 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2327
2328 status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2329 extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2330
2331 con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2332 con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2333
2334 if (status != MFI_STAT_OK) {
2335 con_log(CL_ANN, (CE_WARN,
2336 "IO Cmd Failed SMID %x", cmd->SMID));
2337 } else {
2338 con_log(CL_ANN, (CE_NOTE,
2339 "IO Cmd Success SMID %x", cmd->SMID));
2340 }
2341
2342 /* regular commands */
2343
2344 switch (ddi_get8(acc_handle, &scsi_raid_io->Function)) {
2345
2346 case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
2347 acmd = (struct scsa_cmd *)cmd->cmd;
2348 lbinfo = &instance->load_balance_info[acmd->device_id];
2349
2350 if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2351 arm = lbinfo->raid1DevHandle[0] ==
2352 scsi_raid_io->DevHandle ? 0 : 1;
2353
2354 lbinfo->scsi_pending_cmds[arm]--;
2355 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2356 }
2357 con_log(CL_DLEVEL3, (CE_NOTE,
2358 "FastPath IO Completion Success "));
2359 /* FALLTHRU */
2360
2361 case MPI2_FUNCTION_LD_IO_REQUEST : { /* Regular Path IO. */
2362 acmd = (struct scsa_cmd *)cmd->cmd;
2363 pkt = (struct scsi_pkt *)CMD2PKT(acmd);
2364
2365 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2366 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2367 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2368 acmd->cmd_dma_offset, acmd->cmd_dma_len,
2369 DDI_DMA_SYNC_FORCPU);
2370 }
2371 }
2372
2373 pkt->pkt_reason = CMD_CMPLT;
2374 pkt->pkt_statistics = 0;
2375 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2376 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2377
2378 con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2379 "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2380 ((acmd->islogical) ? "LD" : "PD"),
2381 acmd->cmd_dmacount, cmd->SMID, status));
2382
2383 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2384 struct scsi_inquiry *inq;
2385
2386 if (acmd->cmd_dmacount != 0) {
2387 bp_mapin(acmd->cmd_buf);
2388 inq = (struct scsi_inquiry *)
2389 acmd->cmd_buf->b_un.b_addr;
2390
2391 /* don't expose physical drives to OS */
2392 if (acmd->islogical &&
2393 (status == MFI_STAT_OK)) {
2394 display_scsi_inquiry((caddr_t)inq);
2395 #ifdef PDSUPPORT
2396 } else if ((status == MFI_STAT_OK) &&
2397 inq->inq_dtype == DTYPE_DIRECT) {
2398 display_scsi_inquiry((caddr_t)inq);
2399 #endif
2400 } else {
2401 /* for physical disk */
2402 status = MFI_STAT_DEVICE_NOT_FOUND;
2403 }
2404 }
2405 }
2406
2407 switch (status) {
2408 case MFI_STAT_OK:
2409 pkt->pkt_scbp[0] = STATUS_GOOD;
2410 break;
2411 case MFI_STAT_LD_CC_IN_PROGRESS:
2412 case MFI_STAT_LD_RECON_IN_PROGRESS:
2413 pkt->pkt_scbp[0] = STATUS_GOOD;
2414 break;
2415 case MFI_STAT_LD_INIT_IN_PROGRESS:
2416 pkt->pkt_reason = CMD_TRAN_ERR;
2417 break;
2418 case MFI_STAT_SCSI_IO_FAILED:
2419 cmn_err(CE_WARN, "tbolt_complete_cmd: scsi_io failed");
2420 pkt->pkt_reason = CMD_TRAN_ERR;
2421 break;
2422 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2423 con_log(CL_ANN, (CE_WARN,
2424 "tbolt_complete_cmd: scsi_done with error"));
2425
2426 pkt->pkt_reason = CMD_CMPLT;
2427 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2428
2429 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2430 con_log(CL_ANN,
2431 (CE_WARN, "TEST_UNIT_READY fail"));
2432 } else {
2433 pkt->pkt_state |= STATE_ARQ_DONE;
2434 arqstat = (void *)(pkt->pkt_scbp);
2435 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2436 arqstat->sts_rqpkt_resid = 0;
2437 arqstat->sts_rqpkt_state |=
2438 STATE_GOT_BUS | STATE_GOT_TARGET
2439 | STATE_SENT_CMD
2440 | STATE_XFERRED_DATA;
2441 *(uint8_t *)&arqstat->sts_rqpkt_status =
2442 STATUS_GOOD;
2443 con_log(CL_ANN1,
2444 (CE_NOTE, "Copying Sense data %x",
2445 cmd->SMID));
2446
2447 ddi_rep_get8(acc_handle,
2448 (uint8_t *)&(arqstat->sts_sensedata),
2449 cmd->sense1,
2450 sizeof (struct scsi_extended_sense),
2451 DDI_DEV_AUTOINCR);
2452
2453 }
2454 break;
2455 case MFI_STAT_LD_OFFLINE:
2456 cmn_err(CE_WARN,
2457 "tbolt_complete_cmd: ld offline "
2458 "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2459 /* UNDO: */
2460 ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2461
2462 ddi_get16(acc_handle,
2463 &scsi_raid_io->RaidContext.ldTargetId),
2464
2465 ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2466
2467 pkt->pkt_reason = CMD_DEV_GONE;
2468 pkt->pkt_statistics = STAT_DISCON;
2469 break;
2470 case MFI_STAT_DEVICE_NOT_FOUND:
2471 con_log(CL_ANN, (CE_CONT,
2472 "tbolt_complete_cmd: device not found error"));
2473 pkt->pkt_reason = CMD_DEV_GONE;
2474 pkt->pkt_statistics = STAT_DISCON;
2475 break;
2476
2477 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2478 pkt->pkt_state |= STATE_ARQ_DONE;
2479 pkt->pkt_reason = CMD_CMPLT;
2480 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2481
2482 arqstat = (void *)(pkt->pkt_scbp);
2483 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2484 arqstat->sts_rqpkt_resid = 0;
2485 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2486 | STATE_GOT_TARGET | STATE_SENT_CMD
2487 | STATE_XFERRED_DATA;
2488 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2489
2490 arqstat->sts_sensedata.es_valid = 1;
2491 arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2492 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2493
2494 /*
2495 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2496 * ASC: 0x21h; ASCQ: 0x00h;
2497 */
2498 arqstat->sts_sensedata.es_add_code = 0x21;
2499 arqstat->sts_sensedata.es_qual_code = 0x00;
2500 break;
2501 case MFI_STAT_INVALID_CMD:
2502 case MFI_STAT_INVALID_DCMD:
2503 case MFI_STAT_INVALID_PARAMETER:
2504 case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2505 default:
2506 cmn_err(CE_WARN, "tbolt_complete_cmd: Unknown status!");
2507 pkt->pkt_reason = CMD_TRAN_ERR;
2508
2509 break;
2510 }
2511
2512 atomic_add_16(&instance->fw_outstanding, (-1));
2513
2514 (void) mrsas_common_check(instance, cmd);
2515 if (acmd->cmd_dmahandle) {
2516 if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2517 DDI_SUCCESS) {
2518 ddi_fm_service_impact(instance->dip,
2519 DDI_SERVICE_UNAFFECTED);
2520 pkt->pkt_reason = CMD_TRAN_ERR;
2521 pkt->pkt_statistics = 0;
2522 }
2523 }
2524
2525 /* Call the callback routine */
2526 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2527 (*pkt->pkt_comp)(pkt);
2528
2529 con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2530
2531 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2532
2533 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2534
2535 return_raid_msg_pkt(instance, cmd);
2536 break;
2537 }
2538 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFA command. */
2539
2540 if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2541 cmd->frame->dcmd.mbox.b[1] == 1) {
2542
2543 mutex_enter(&instance->sync_map_mtx);
2544
2545 con_log(CL_ANN, (CE_NOTE,
2546 "LDMAP sync command SMID RECEIVED 0x%X",
2547 cmd->SMID));
2548 if (cmd->frame->hdr.cmd_status != 0) {
2549 cmn_err(CE_WARN,
2550 "map sync failed, status = 0x%x.",
2551 cmd->frame->hdr.cmd_status);
2552 } else {
2553 instance->map_id++;
2554 cmn_err(CE_NOTE,
2555 "map sync received, switched map_id to %"
2556 PRIu64 " \n", instance->map_id);
2557 }
2558
2559 if (MR_ValidateMapInfo(instance->ld_map[
2560 (instance->map_id & 1)],
2561 instance->load_balance_info)) {
2562 instance->fast_path_io = 1;
2563 } else {
2564 instance->fast_path_io = 0;
2565 }
2566
2567 con_log(CL_ANN, (CE_NOTE,
2568 "instance->fast_path_io %d",
2569 instance->fast_path_io));
2570
2571 instance->unroll.syncCmd = 0;
2572
2573 if (instance->map_update_cmd == cmd) {
2574 return_raid_msg_pkt(instance, cmd);
2575 atomic_add_16(&instance->fw_outstanding, (-1));
2576 (void) mrsas_tbolt_sync_map_info(instance);
2577 }
2578
2579 cmn_err(CE_NOTE, "LDMAP sync completed.");
2580 mutex_exit(&instance->sync_map_mtx);
2581 break;
2582 }
2583
2584 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2585 con_log(CL_ANN1, (CE_CONT,
2586 "AEN command SMID RECEIVED 0x%X",
2587 cmd->SMID));
2588 if ((instance->aen_cmd == cmd) &&
2589 (instance->aen_cmd->abort_aen)) {
2590 con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2591 "aborted_aen returned"));
2592 } else {
2593 atomic_add_16(&instance->fw_outstanding, (-1));
2594 service_mfi_aen(instance, cmd);
2595 }
2596 }
2597
2598 if (cmd->sync_cmd == MRSAS_TRUE) {
2599 con_log(CL_ANN1, (CE_CONT,
2600 "Sync-mode Command Response SMID RECEIVED 0x%X",
2601 cmd->SMID));
2602
2603 tbolt_complete_cmd_in_sync_mode(instance, cmd);
2604 } else {
2605 con_log(CL_ANN, (CE_CONT,
2606 "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2607 cmd->SMID));
2608 }
2609 break;
2610 default:
2611 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2612 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2613
2614 /* free message */
2615 con_log(CL_ANN,
2616 (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2617 break;
2618 }
2619 }
2620
2621 uint_t
2622 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2623 {
2624 uint8_t replyType;
2625 Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2626 Mpi2ReplyDescriptorsUnion_t *desc;
2627 uint16_t smid;
2628 union desc_value d_val;
2629 struct mrsas_cmd *cmd;
2630
2631 struct mrsas_header *hdr;
2632 struct scsi_pkt *pkt;
2633
2634 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2635 0, 0, DDI_DMA_SYNC_FORDEV);
2636
2637 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2638 0, 0, DDI_DMA_SYNC_FORCPU);
2639
2640 desc = instance->reply_frame_pool;
2641 desc += instance->reply_read_index;
2642
2643 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2644 replyType = replyDesc->ReplyFlags &
2645 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2646
2647 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2648 return (DDI_INTR_UNCLAIMED);
2649
2650 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2651 != DDI_SUCCESS) {
2652 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2653 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2654 con_log(CL_ANN1,
2655 (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2656 "FMA check, returning DDI_INTR_UNCLAIMED"));
2657 return (DDI_INTR_CLAIMED);
2658 }
2659
2660 con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64,
2661 (void *)desc, desc->Words));
2662
2663 d_val.word = desc->Words;
2664
2665
2666 /* Read Reply descriptor */
2667 while ((d_val.u1.low != 0xffffffff) &&
2668 (d_val.u1.high != 0xffffffff)) {
2669
2670 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2671 0, 0, DDI_DMA_SYNC_FORCPU);
2672
2673 smid = replyDesc->SMID;
2674
2675 if (!smid || smid > instance->max_fw_cmds + 1) {
2676 con_log(CL_ANN1, (CE_NOTE,
2677 "Reply Desc at Break = %p Words = %" PRIx64,
2678 (void *)desc, desc->Words));
2679 break;
2680 }
2681
2682 cmd = instance->cmd_list[smid - 1];
2683 if (!cmd) {
2684 con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2685 "outstanding_cmd: Invalid command "
2686 " or Poll commad Received in completion path"));
2687 } else {
2688 mutex_enter(&instance->cmd_pend_mtx);
2689 if (cmd->sync_cmd == MRSAS_TRUE) {
2690 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2691 if (hdr) {
2692 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2693 "tbolt_process_outstanding_cmd:"
2694 " mlist_del_init(&cmd->list)."));
2695 mlist_del_init(&cmd->list);
2696 }
2697 } else {
2698 pkt = cmd->pkt;
2699 if (pkt) {
2700 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2701 "tbolt_process_outstanding_cmd:"
2702 "mlist_del_init(&cmd->list)."));
2703 mlist_del_init(&cmd->list);
2704 }
2705 }
2706
2707 mutex_exit(&instance->cmd_pend_mtx);
2708
2709 tbolt_complete_cmd(instance, cmd);
2710 }
2711 /* set it back to all 1s. */
2712 desc->Words = -1LL;
2713
2714 instance->reply_read_index++;
2715
2716 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2717 con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2718 instance->reply_read_index = 0;
2719 }
2720
2721 /* Get the next reply descriptor */
2722 if (!instance->reply_read_index)
2723 desc = instance->reply_frame_pool;
2724 else
2725 desc++;
2726
2727 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2728
2729 d_val.word = desc->Words;
2730
2731 con_log(CL_ANN1, (CE_NOTE,
2732 "Next Reply Desc = %p Words = %" PRIx64,
2733 (void *)desc, desc->Words));
2734
2735 replyType = replyDesc->ReplyFlags &
2736 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2737
2738 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2739 break;
2740
2741 } /* End of while loop. */
2742
2743 /* update replyIndex to FW */
2744 WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2745
2746
2747 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2748 0, 0, DDI_DMA_SYNC_FORDEV);
2749
2750 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2751 0, 0, DDI_DMA_SYNC_FORCPU);
2752 return (DDI_INTR_CLAIMED);
2753 }
2754
2755
2756
2757
2758 /*
2759 * complete_cmd_in_sync_mode - Completes an internal command
2760 * @instance: Adapter soft state
2761 * @cmd: Command to be completed
2762 *
2763 * The issue_cmd_in_sync_mode() function waits for a command to complete
2764 * after it issues a command. This function wakes up that waiting routine by
2765 * calling wake_up() on the wait queue.
2766 */
2767 void
2768 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2769 struct mrsas_cmd *cmd)
2770 {
2771
2772 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2773 &cmd->frame->io.cmd_status);
2774
2775 cmd->sync_cmd = MRSAS_FALSE;
2776
2777 mutex_enter(&instance->int_cmd_mtx);
2778 if (cmd->cmd_status == ENODATA) {
2779 cmd->cmd_status = 0;
2780 }
2781 cv_broadcast(&instance->int_cmd_cv);
2782 mutex_exit(&instance->int_cmd_mtx);
2783
2784 }
2785
2786 /*
2787 * mrsas_tbolt_get_ld_map_info - Returns ld_map structure
2788 * instance: Adapter soft state
2789 *
2790 * Issues an internal command (DCMD) to get the FW's controller PD
2791 * list structure. This information is mainly used to find out SYSTEM
2792 * supported by the FW.
2793 */
2794 int
2795 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2796 {
2797 int ret = 0;
2798 struct mrsas_cmd *cmd = NULL;
2799 struct mrsas_dcmd_frame *dcmd;
2800 MR_FW_RAID_MAP_ALL *ci;
2801 uint32_t ci_h = 0;
2802 U32 size_map_info;
2803
2804 cmd = get_raid_msg_pkt(instance);
2805
2806 if (cmd == NULL) {
2807 cmn_err(CE_WARN,
2808 "Failed to get a cmd from free-pool in get_ld_map_info()");
2809 return (DDI_FAILURE);
2810 }
2811
2812 dcmd = &cmd->frame->dcmd;
2813
2814 size_map_info = sizeof (MR_FW_RAID_MAP) +
2815 (sizeof (MR_LD_SPAN_MAP) *
2816 (MAX_LOGICAL_DRIVES - 1));
2817
2818 con_log(CL_ANN, (CE_NOTE,
2819 "size_map_info : 0x%x", size_map_info));
2820
2821 ci = instance->ld_map[(instance->map_id & 1)];
2822 ci_h = instance->ld_map_phy[(instance->map_id & 1)];
2823
2824 if (!ci) {
2825 cmn_err(CE_WARN, "Failed to alloc mem for ld_map_info");
2826 return_raid_msg_pkt(instance, cmd);
2827 return (-1);
2828 }
2829
2830 bzero(ci, sizeof (*ci));
2831 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2832
2833 dcmd->cmd = MFI_CMD_OP_DCMD;
2834 dcmd->cmd_status = 0xFF;
2835 dcmd->sge_count = 1;
2836 dcmd->flags = MFI_FRAME_DIR_READ;
2837 dcmd->timeout = 0;
2838 dcmd->pad_0 = 0;
2839 dcmd->data_xfer_len = size_map_info;
2840 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2841 dcmd->sgl.sge32[0].phys_addr = ci_h;
2842 dcmd->sgl.sge32[0].length = size_map_info;
2843
2844
2845 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2846
2847 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2848 ret = 0;
2849 con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2850 } else {
2851 cmn_err(CE_WARN, "Get LD Map Info failed");
2852 ret = -1;
2853 }
2854
2855 return_raid_msg_pkt(instance, cmd);
2856
2857 return (ret);
2858 }
2859
2860 void
2861 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2862 {
2863 uint32_t i;
2864 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2865 union desc_value d_val;
2866
2867 reply_desc = instance->reply_frame_pool;
2868
2869 for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2870 d_val.word = reply_desc->Words;
2871 con_log(CL_DLEVEL3, (CE_NOTE,
2872 "i=%d, %x:%x",
2873 i, d_val.u1.high, d_val.u1.low));
2874 }
2875 }
2876
2877 /*
2878 * mrsas_tbolt_command_create - Create command for fast path.
2879 * @io_info: MegaRAID IO request packet pointer.
2880 * @ref_tag: Reference tag for RD/WRPROTECT
2881 *
2882 * Create the command for fast path.
2883 */
2884 void
2885 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2886 struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2887 U32 ref_tag)
2888 {
2889 uint16_t EEDPFlags;
2890 uint32_t Control;
2891 ddi_acc_handle_t acc_handle =
2892 instance->mpi2_frame_pool_dma_obj.acc_handle;
2893
2894 /* Prepare 32-byte CDB if DIF is supported on this device */
2895 con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2896
2897 bzero(cdb, 32);
2898
2899 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2900
2901
2902 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
2903
2904 if (io_info->isRead)
2905 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2906 else
2907 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2908
2909 /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2910 cdb[10] = MRSAS_RD_WR_PROTECT;
2911
2912 /* LOGICAL BLOCK ADDRESS */
2913 cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2914 cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2915 cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2916 cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2917 cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2918 cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2919 cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2920 cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2921
2922 /* Logical block reference tag */
2923 ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2924 BE_32(ref_tag));
2925
2926 ddi_put16(acc_handle,
2927 &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
2928
2929 ddi_put32(acc_handle, &scsi_io_request->DataLength,
2930 ((io_info->numBlocks)*512));
2931 /* Specify 32-byte cdb */
2932 ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
2933
2934 /* Transfer length */
2935 cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
2936 cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
2937 cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
2938 cdb[31] = (U8)((io_info->numBlocks) & 0xff);
2939
2940 /* set SCSI IO EEDPFlags */
2941 EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
2942 Control = ddi_get32(acc_handle, &scsi_io_request->Control);
2943
2944 /* set SCSI IO EEDPFlags bits */
2945 if (io_info->isRead) {
2946 /*
2947 * For READ commands, the EEDPFlags shall be set to specify to
2948 * Increment the Primary Reference Tag, to Check the Reference
2949 * Tag, and to Check and Remove the Protection Information
2950 * fields.
2951 */
2952 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2953 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2954 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
2955 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
2956 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2957 } else {
2958 /*
2959 * For WRITE commands, the EEDPFlags shall be set to specify to
2960 * Increment the Primary Reference Tag, and to Insert
2961 * Protection Information fields.
2962 */
2963 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2964 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2965 }
2966 Control |= (0x4 << 26);
2967
2968 ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
2969 ddi_put32(acc_handle, &scsi_io_request->Control, Control);
2970 ddi_put32(acc_handle,
2971 &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
2972 }
2973
2974
2975 /*
2976 * mrsas_tbolt_set_pd_lba - Sets PD LBA
2977 * @cdb: CDB
2978 * @cdb_len: cdb length
2979 * @start_blk: Start block of IO
2980 *
2981 * Used to set the PD LBA in CDB for FP IOs
2982 */
2983 static void
2984 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
2985 U32 num_blocks)
2986 {
2987 U8 cdb_len = *cdb_len_ptr;
2988 U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
2989
2990 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
2991 if (((cdb_len == 12) || (cdb_len == 16)) &&
2992 (start_blk <= 0xffffffff)) {
2993 if (cdb_len == 16) {
2994 con_log(CL_ANN,
2995 (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
2996 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
2997 flagvals = cdb[1];
2998 groupnum = cdb[14];
2999 control = cdb[15];
3000 } else {
3001 con_log(CL_ANN,
3002 (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
3003 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3004 flagvals = cdb[1];
3005 groupnum = cdb[10];
3006 control = cdb[11];
3007 }
3008
3009 bzero(cdb, sizeof (cdb));
3010
3011 cdb[0] = opcode;
3012 cdb[1] = flagvals;
3013 cdb[6] = groupnum;
3014 cdb[9] = control;
3015 /* Set transfer length */
3016 cdb[8] = (U8)(num_blocks & 0xff);
3017 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3018 cdb_len = 10;
3019 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3020 /* Convert to 16 byte CDB for large LBA's */
3021 con_log(CL_ANN,
3022 (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3023 switch (cdb_len) {
3024 case 6:
3025 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3026 control = cdb[5];
3027 break;
3028 case 10:
3029 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3030 flagvals = cdb[1];
3031 groupnum = cdb[6];
3032 control = cdb[9];
3033 break;
3034 case 12:
3035 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3036 flagvals = cdb[1];
3037 groupnum = cdb[10];
3038 control = cdb[11];
3039 break;
3040 }
3041
3042 bzero(cdb, sizeof (cdb));
3043
3044 cdb[0] = opcode;
3045 cdb[1] = flagvals;
3046 cdb[14] = groupnum;
3047 cdb[15] = control;
3048
3049 /* Transfer length */
3050 cdb[13] = (U8)(num_blocks & 0xff);
3051 cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3052 cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3053 cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3054
3055 /* Specify 16-byte cdb */
3056 cdb_len = 16;
3057 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3058 /* convert to 10 byte CDB */
3059 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3060 control = cdb[5];
3061
3062 bzero(cdb, sizeof (cdb));
3063 cdb[0] = opcode;
3064 cdb[9] = control;
3065
3066 /* Set transfer length */
3067 cdb[8] = (U8)(num_blocks & 0xff);
3068 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3069
3070 /* Specify 10-byte cdb */
3071 cdb_len = 10;
3072 }
3073
3074
3075 /* Fall through Normal case, just load LBA here */
3076 switch (cdb_len) {
3077 case 6:
3078 {
3079 U8 val = cdb[1] & 0xE0;
3080 cdb[3] = (U8)(start_blk & 0xff);
3081 cdb[2] = (U8)((start_blk >> 8) & 0xff);
3082 cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3083 break;
3084 }
3085 case 10:
3086 cdb[5] = (U8)(start_blk & 0xff);
3087 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3088 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3089 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3090 break;
3091 case 12:
3092 cdb[5] = (U8)(start_blk & 0xff);
3093 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3094 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3095 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3096 break;
3097
3098 case 16:
3099 cdb[9] = (U8)(start_blk & 0xff);
3100 cdb[8] = (U8)((start_blk >> 8) & 0xff);
3101 cdb[7] = (U8)((start_blk >> 16) & 0xff);
3102 cdb[6] = (U8)((start_blk >> 24) & 0xff);
3103 cdb[5] = (U8)((start_blk >> 32) & 0xff);
3104 cdb[4] = (U8)((start_blk >> 40) & 0xff);
3105 cdb[3] = (U8)((start_blk >> 48) & 0xff);
3106 cdb[2] = (U8)((start_blk >> 56) & 0xff);
3107 break;
3108 }
3109
3110 *cdb_len_ptr = cdb_len;
3111 }
3112
3113
3114 static int
3115 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3116 {
3117 MR_FW_RAID_MAP_ALL *ld_map;
3118
3119 if (!mrsas_tbolt_get_ld_map_info(instance)) {
3120
3121 ld_map = instance->ld_map[(instance->map_id & 1)];
3122
3123 con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3124 ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3125
3126 if (MR_ValidateMapInfo(instance->ld_map[
3127 (instance->map_id & 1)], instance->load_balance_info)) {
3128 con_log(CL_ANN,
3129 (CE_CONT, "MR_ValidateMapInfo success"));
3130
3131 instance->fast_path_io = 1;
3132 con_log(CL_ANN,
3133 (CE_NOTE, "instance->fast_path_io %d",
3134 instance->fast_path_io));
3135
3136 return (DDI_SUCCESS);
3137 }
3138
3139 }
3140
3141 instance->fast_path_io = 0;
3142 cmn_err(CE_WARN, "MR_ValidateMapInfo failed");
3143 con_log(CL_ANN, (CE_NOTE,
3144 "instance->fast_path_io %d", instance->fast_path_io));
3145
3146 return (DDI_FAILURE);
3147 }
3148
3149 /*
3150 * Marks HBA as bad. This will be called either when an
3151 * IO packet times out even after 3 FW resets
3152 * or FW is found to be fault even after 3 continuous resets.
3153 */
3154
3155 void
3156 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3157 {
3158 cmn_err(CE_NOTE, "TBOLT Kill adapter called");
3159
3160 if (instance->deadadapter == 1)
3161 return;
3162
3163 con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3164 "Writing to doorbell with MFI_STOP_ADP "));
3165 mutex_enter(&instance->ocr_flags_mtx);
3166 instance->deadadapter = 1;
3167 mutex_exit(&instance->ocr_flags_mtx);
3168 instance->func_ptr->disable_intr(instance);
3169 WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3170 /* Flush */
3171 (void) RD_RESERVED0_REGISTER(instance);
3172
3173 (void) mrsas_print_pending_cmds(instance);
3174 (void) mrsas_complete_pending_cmds(instance);
3175 }
3176
3177 void
3178 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3179 {
3180 int i;
3181 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3182 instance->reply_read_index = 0;
3183
3184 /* initializing reply address to 0xFFFFFFFF */
3185 reply_desc = instance->reply_frame_pool;
3186
3187 for (i = 0; i < instance->reply_q_depth; i++) {
3188 reply_desc->Words = (uint64_t)~0;
3189 reply_desc++;
3190 }
3191 }
3192
3193 int
3194 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3195 {
3196 uint32_t status = 0x00;
3197 uint32_t retry = 0;
3198 uint32_t cur_abs_reg_val;
3199 uint32_t fw_state;
3200 uint32_t abs_state;
3201 uint32_t i;
3202
3203 con_log(CL_ANN, (CE_NOTE,
3204 "mrsas_tbolt_reset_ppc entered"));
3205
3206 if (instance->deadadapter == 1) {
3207 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3208 "no more resets as HBA has been marked dead ");
3209 return (DDI_FAILURE);
3210 }
3211
3212 mutex_enter(&instance->ocr_flags_mtx);
3213 instance->adapterresetinprogress = 1;
3214 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3215 "adpterresetinprogress flag set, time %llx", gethrtime()));
3216 mutex_exit(&instance->ocr_flags_mtx);
3217
3218 instance->func_ptr->disable_intr(instance);
3219
3220 /* Add delay inorder to complete the ioctl & io cmds in-flight */
3221 for (i = 0; i < 3000; i++) {
3222 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3223 }
3224
3225 instance->reply_read_index = 0;
3226
3227 retry_reset:
3228 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3229 ":Resetting TBOLT "));
3230
3231 WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3232 WR_TBOLT_IB_WRITE_SEQ(4, instance);
3233 WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3234 WR_TBOLT_IB_WRITE_SEQ(2, instance);
3235 WR_TBOLT_IB_WRITE_SEQ(7, instance);
3236 WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3237 con_log(CL_ANN1, (CE_NOTE,
3238 "mrsas_tbolt_reset_ppc: magic number written "
3239 "to write sequence register"));
3240 delay(100 * drv_usectohz(MILLISEC));
3241 status = RD_TBOLT_HOST_DIAG(instance);
3242 con_log(CL_ANN1, (CE_NOTE,
3243 "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3244 "to write sequence register"));
3245
3246 while (status & DIAG_TBOLT_RESET_ADAPTER) {
3247 delay(100 * drv_usectohz(MILLISEC));
3248 status = RD_TBOLT_HOST_DIAG(instance);
3249 if (retry++ == 100) {
3250 cmn_err(CE_WARN,
3251 "mrsas_tbolt_reset_ppc:"
3252 "resetadapter bit is set already "
3253 "check retry count %d", retry);
3254 return (DDI_FAILURE);
3255 }
3256 }
3257
3258 WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3259 delay(100 * drv_usectohz(MILLISEC));
3260
3261 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3262 (uint8_t *)((uintptr_t)(instance)->regmap +
3263 RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3264
3265 while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3266 delay(100 * drv_usectohz(MILLISEC));
3267 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3268 (uint8_t *)((uintptr_t)(instance)->regmap +
3269 RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3270 if (retry++ == 100) {
3271 /* Dont call kill adapter here */
3272 /* RESET BIT ADAPTER is cleared by firmare */
3273 /* mrsas_tbolt_kill_adapter(instance); */
3274 cmn_err(CE_WARN,
3275 "mr_sas %d: %s(): RESET FAILED; return failure!!!",
3276 instance->instance, __func__);
3277 return (DDI_FAILURE);
3278 }
3279 }
3280
3281 con_log(CL_ANN,
3282 (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3283 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3284 "Calling mfi_state_transition_to_ready"));
3285
3286 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3287 retry = 0;
3288 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3289 delay(100 * drv_usectohz(MILLISEC));
3290 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3291 }
3292 if (abs_state <= MFI_STATE_FW_INIT) {
3293 cmn_err(CE_WARN,
3294 "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3295 "state = 0x%x, RETRY RESET.", abs_state);
3296 goto retry_reset;
3297 }
3298
3299 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3300 if (mfi_state_transition_to_ready(instance) ||
3301 debug_tbolt_fw_faults_after_ocr_g == 1) {
3302 cur_abs_reg_val =
3303 instance->func_ptr->read_fw_status_reg(instance);
3304 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3305
3306 con_log(CL_ANN1, (CE_NOTE,
3307 "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3308 "FW state = 0x%x", fw_state));
3309 if (debug_tbolt_fw_faults_after_ocr_g == 1)
3310 fw_state = MFI_STATE_FAULT;
3311
3312 con_log(CL_ANN,
3313 (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
3314 "FW state = 0x%x", fw_state));
3315
3316 if (fw_state == MFI_STATE_FAULT) {
3317 /* increment the count */
3318 instance->fw_fault_count_after_ocr++;
3319 if (instance->fw_fault_count_after_ocr
3320 < MAX_FW_RESET_COUNT) {
3321 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3322 "FW is in fault after OCR count %d "
3323 "Retry Reset",
3324 instance->fw_fault_count_after_ocr);
3325 goto retry_reset;
3326
3327 } else {
3328 cmn_err(CE_WARN, "mrsas %d: %s:"
3329 "Max Reset Count exceeded >%d"
3330 "Mark HBA as bad, KILL adapter",
3331 instance->instance, __func__,
3332 MAX_FW_RESET_COUNT);
3333
3334 mrsas_tbolt_kill_adapter(instance);
3335 return (DDI_FAILURE);
3336 }
3337 }
3338 }
3339
3340 /* reset the counter as FW is up after OCR */
3341 instance->fw_fault_count_after_ocr = 0;
3342
3343 mrsas_reset_reply_desc(instance);
3344
3345
3346 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3347 "Calling mrsas_issue_init_mpi2"));
3348 abs_state = mrsas_issue_init_mpi2(instance);
3349 if (abs_state == (uint32_t)DDI_FAILURE) {
3350 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3351 "INIT failed Retrying Reset");
3352 goto retry_reset;
3353 }
3354 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3355 "mrsas_issue_init_mpi2 Done"));
3356
3357 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3358 "Calling mrsas_print_pending_cmd"));
3359 (void) mrsas_print_pending_cmds(instance);
3360 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3361 "mrsas_print_pending_cmd done"));
3362
3363 instance->func_ptr->enable_intr(instance);
3364 instance->fw_outstanding = 0;
3365
3366 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3367 "Calling mrsas_issue_pending_cmds"));
3368 (void) mrsas_issue_pending_cmds(instance);
3369 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3370 "issue_pending_cmds done."));
3371
3372 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3373 "Calling aen registration"));
3374
3375 instance->aen_cmd->retry_count_for_ocr = 0;
3376 instance->aen_cmd->drv_pkt_time = 0;
3377
3378 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3379
3380 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag."));
3381 mutex_enter(&instance->ocr_flags_mtx);
3382 instance->adapterresetinprogress = 0;
3383 mutex_exit(&instance->ocr_flags_mtx);
3384 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3385 "adpterresetinprogress flag unset"));
3386
3387 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done"));
3388 return (DDI_SUCCESS);
3389
3390 }
3391
3392
3393 /*
3394 * mrsas_sync_map_info - Returns FW's ld_map structure
3395 * @instance: Adapter soft state
3396 *
3397 * Issues an internal command (DCMD) to get the FW's controller PD
3398 * list structure. This information is mainly used to find out SYSTEM
3399 * supported by the FW.
3400 */
3401
3402 static int
3403 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3404 {
3405 int ret = 0, i;
3406 struct mrsas_cmd *cmd = NULL;
3407 struct mrsas_dcmd_frame *dcmd;
3408 uint32_t size_sync_info, num_lds;
3409 LD_TARGET_SYNC *ci = NULL;
3410 MR_FW_RAID_MAP_ALL *map;
3411 MR_LD_RAID *raid;
3412 LD_TARGET_SYNC *ld_sync;
3413 uint32_t ci_h = 0;
3414 uint32_t size_map_info;
3415
3416 cmd = get_raid_msg_pkt(instance);
3417
3418 if (cmd == NULL) {
3419 cmn_err(CE_WARN, "Failed to get a cmd from free-pool in "
3420 "mrsas_tbolt_sync_map_info(). ");
3421 return (DDI_FAILURE);
3422 }
3423
3424 /* Clear the frame buffer and assign back the context id */
3425 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3426 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3427 cmd->index);
3428 bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3429
3430
3431 map = instance->ld_map[instance->map_id & 1];
3432
3433 num_lds = map->raidMap.ldCount;
3434
3435 dcmd = &cmd->frame->dcmd;
3436
3437 size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3438
3439 con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3440 size_sync_info, num_lds));
3441
3442 ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3443
3444 bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3445 ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3446
3447 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3448
3449 ld_sync = (LD_TARGET_SYNC *)ci;
3450
3451 for (i = 0; i < num_lds; i++, ld_sync++) {
3452 raid = MR_LdRaidGet(i, map);
3453
3454 con_log(CL_ANN1,
3455 (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3456 i, raid->seqNum, raid->flags.ldSyncRequired));
3457
3458 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3459
3460 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3461 i, ld_sync->ldTargetId));
3462
3463 ld_sync->seqNum = raid->seqNum;
3464 }
3465
3466
3467 size_map_info = sizeof (MR_FW_RAID_MAP) +
3468 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3469
3470 dcmd->cmd = MFI_CMD_OP_DCMD;
3471 dcmd->cmd_status = 0xFF;
3472 dcmd->sge_count = 1;
3473 dcmd->flags = MFI_FRAME_DIR_WRITE;
3474 dcmd->timeout = 0;
3475 dcmd->pad_0 = 0;
3476 dcmd->data_xfer_len = size_map_info;
3477 ASSERT(num_lds <= 255);
3478 dcmd->mbox.b[0] = (U8)num_lds;
3479 dcmd->mbox.b[1] = 1; /* Pend */
3480 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3481 dcmd->sgl.sge32[0].phys_addr = ci_h;
3482 dcmd->sgl.sge32[0].length = size_map_info;
3483
3484
3485 instance->map_update_cmd = cmd;
3486 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3487
3488 instance->func_ptr->issue_cmd(cmd, instance);
3489
3490 instance->unroll.syncCmd = 1;
3491 con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3492
3493 return (ret);
3494 }
3495
3496 /*
3497 * abort_syncmap_cmd
3498 */
3499 int
3500 abort_syncmap_cmd(struct mrsas_instance *instance,
3501 struct mrsas_cmd *cmd_to_abort)
3502 {
3503 int ret = 0;
3504
3505 struct mrsas_cmd *cmd;
3506 struct mrsas_abort_frame *abort_fr;
3507
3508 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3509
3510 cmd = get_raid_msg_mfi_pkt(instance);
3511
3512 if (!cmd) {
3513 cmn_err(CE_WARN,
3514 "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3515 return (DDI_FAILURE);
3516 }
3517 /* Clear the frame buffer and assign back the context id */
3518 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3519 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3520 cmd->index);
3521
3522 abort_fr = &cmd->frame->abort;
3523
3524 /* prepare and issue the abort frame */
3525 ddi_put8(cmd->frame_dma_obj.acc_handle,
3526 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3527 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3528 MFI_CMD_STATUS_SYNC_MODE);
3529 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3530 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3531 cmd_to_abort->index);
3532 ddi_put32(cmd->frame_dma_obj.acc_handle,
3533 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3534 ddi_put32(cmd->frame_dma_obj.acc_handle,
3535 &abort_fr->abort_mfi_phys_addr_hi, 0);
3536
3537 cmd->frame_count = 1;
3538
3539 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3540
3541 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3542 con_log(CL_ANN1, (CE_WARN,
3543 "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3544 ret = -1;
3545 } else {
3546 ret = 0;
3547 }
3548
3549 return_raid_msg_mfi_pkt(instance, cmd);
3550
3551 atomic_add_16(&instance->fw_outstanding, (-1));
3552
3553 return (ret);
3554 }
3555
3556
3557 #ifdef PDSUPPORT
3558 /*
3559 * Even though these functions were originally intended for 2208 only, it
3560 * turns out they're useful for "Skinny" support as well. In a perfect world,
3561 * these two functions would be either in mr_sas.c, or in their own new source
3562 * file. Since this driver needs some cleanup anyway, keep this portion in
3563 * mind as well.
3564 */
3565
3566 int
3567 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3568 uint8_t lun, dev_info_t **ldip)
3569 {
3570 struct scsi_device *sd;
3571 dev_info_t *child;
3572 int rval, dtype;
3573 struct mrsas_tbolt_pd_info *pds = NULL;
3574
3575 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3576 tgt, lun));
3577
3578 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3579 if (ldip) {
3580 *ldip = child;
3581 }
3582 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3583 rval = mrsas_service_evt(instance, tgt, 1,
3584 MRSAS_EVT_UNCONFIG_TGT, NULL);
3585 con_log(CL_ANN1, (CE_WARN,
3586 "mr_sas:DELETING STALE ENTRY rval = %d "
3587 "tgt id = %d", rval, tgt));
3588 return (NDI_FAILURE);
3589 }
3590 return (NDI_SUCCESS);
3591 }
3592
3593 pds = (struct mrsas_tbolt_pd_info *)
3594 kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3595 mrsas_tbolt_get_pd_info(instance, pds, tgt);
3596 dtype = pds->scsiDevType;
3597
3598 /* Check for Disk */
3599 if ((dtype == DTYPE_DIRECT)) {
3600 if ((dtype == DTYPE_DIRECT) &&
3601 (LE_16(pds->fwState) != PD_SYSTEM)) {
3602 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3603 return (NDI_FAILURE);
3604 }
3605 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3606 sd->sd_address.a_hba_tran = instance->tran;
3607 sd->sd_address.a_target = (uint16_t)tgt;
3608 sd->sd_address.a_lun = (uint8_t)lun;
3609
3610 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3611 rval = mrsas_config_scsi_device(instance, sd, ldip);
3612 con_log(CL_DLEVEL1, (CE_NOTE,
3613 "Phys. device found: tgt %d dtype %d: %s",
3614 tgt, dtype, sd->sd_inq->inq_vid));
3615 } else {
3616 rval = NDI_FAILURE;
3617 con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3618 "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3619 tgt, dtype, sd->sd_inq->inq_vid));
3620 }
3621
3622 /* sd_unprobe is blank now. Free buffer manually */
3623 if (sd->sd_inq) {
3624 kmem_free(sd->sd_inq, SUN_INQSIZE);
3625 sd->sd_inq = (struct scsi_inquiry *)NULL;
3626 }
3627 kmem_free(sd, sizeof (struct scsi_device));
3628 } else {
3629 con_log(CL_ANN1, (CE_NOTE,
3630 "Device not supported: tgt %d lun %d dtype %d",
3631 tgt, lun, dtype));
3632 rval = NDI_FAILURE;
3633 }
3634
3635 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3636 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3637 rval));
3638 return (rval);
3639 }
3640
3641 static void
3642 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3643 struct mrsas_tbolt_pd_info *pds, int tgt)
3644 {
3645 struct mrsas_cmd *cmd;
3646 struct mrsas_dcmd_frame *dcmd;
3647 dma_obj_t dcmd_dma_obj;
3648
3649 ASSERT(instance->tbolt || instance->skinny);
3650
3651 if (instance->tbolt)
3652 cmd = get_raid_msg_pkt(instance);
3653 else
3654 cmd = mrsas_get_mfi_pkt(instance);
3655
3656 if (!cmd) {
3657 con_log(CL_ANN1,
3658 (CE_WARN, "Failed to get a cmd for get pd info"));
3659 return;
3660 }
3661
3662 /* Clear the frame buffer and assign back the context id */
3663 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3664 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3665 cmd->index);
3666
3667
3668 dcmd = &cmd->frame->dcmd;
3669 dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3670 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3671 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3672 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3673 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3674 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3675
3676 (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3677 DDI_STRUCTURE_LE_ACC);
3678 bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3679 bzero(dcmd->mbox.b, 12);
3680 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3681 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3682 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3683 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3684 MFI_FRAME_DIR_READ);
3685 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3686 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3687 sizeof (struct mrsas_tbolt_pd_info));
3688 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3689 MR_DCMD_PD_GET_INFO);
3690 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3691 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3692 sizeof (struct mrsas_tbolt_pd_info));
3693 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3694 dcmd_dma_obj.dma_cookie[0].dmac_address);
3695
3696 cmd->sync_cmd = MRSAS_TRUE;
3697 cmd->frame_count = 1;
3698
3699 if (instance->tbolt)
3700 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3701
3702 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3703
3704 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3705 (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3706 DDI_DEV_AUTOINCR);
3707 (void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3708
3709 if (instance->tbolt)
3710 return_raid_msg_pkt(instance, cmd);
3711 else
3712 mrsas_return_mfi_pkt(instance, cmd);
3713 }
3714 #endif