Print this page
5719 Add support for LSI Fury adapters
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c
+++ new/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c
1 1 /*
2 2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 3 * i.e. Thunderbolt and Invader
4 4 *
5 5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 6 * Copyright (c) 2008-2012, LSI Logic Corporation.
7 7 * All rights reserved.
8 8 *
9 9 * Version:
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
10 10 * Author:
11 11 * Swaminathan K S
12 12 * Arun Chandrashekhar
13 13 * Manju R
14 14 * Rasheed
15 15 * Shakeel Bukhari
16 16 */
17 17
18 18 /*
19 19 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
20 + * Copyright 2015 Garrett D'Amore <garrett@damore.org>
20 21 */
21 22
22 23
23 24 #include <sys/types.h>
24 25 #include <sys/file.h>
25 26 #include <sys/atomic.h>
26 27 #include <sys/scsi/scsi.h>
27 28 #include <sys/byteorder.h>
28 29 #include "ld_pd_map.h"
29 30 #include "mr_sas.h"
30 31 #include "fusion.h"
31 32
32 33 /*
33 34 * FMA header files
34 35 */
35 36 #include <sys/ddifm.h>
36 37 #include <sys/fm/protocol.h>
37 38 #include <sys/fm/util.h>
38 39 #include <sys/fm/io/ddi.h>
39 40
40 41
41 42 /* Pre-TB command size and TB command size. */
42 43 #define MR_COMMAND_SIZE (64*20) /* 1280 bytes */
43 44 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
44 45 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
45 46 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
46 47 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
47 48 extern ddi_dma_attr_t mrsas_generic_dma_attr;
48 49 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
49 50 extern struct ddi_device_acc_attr endian_attr;
50 51 extern int debug_level_g;
51 52 extern unsigned int enable_fp;
52 53 volatile int dump_io_wait_time = 90;
53 54 extern volatile int debug_timeout_g;
54 55 extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
55 56 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
56 57 extern void push_pending_mfi_pkt(struct mrsas_instance *,
57 58 struct mrsas_cmd *);
58 59 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
59 60 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
60 61
61 62 /* Local static prototypes. */
62 63 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
63 64 struct scsi_address *, struct scsi_pkt *, uchar_t *);
64 65 static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
65 66 U64 start_blk, U32 num_blocks);
66 67 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
67 68 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
68 69 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
69 70 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
70 71 #ifdef PDSUPPORT
71 72 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
72 73 struct mrsas_tbolt_pd_info *, int);
73 74 #endif /* PDSUPPORT */
74 75
75 76 static int debug_tbolt_fw_faults_after_ocr_g = 0;
76 77
77 78 /*
78 79 * destroy_mfi_mpi_frame_pool
79 80 */
80 81 void
81 82 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
82 83 {
83 84 int i;
84 85
85 86 struct mrsas_cmd *cmd;
86 87
87 88 /* return all mfi frames to pool */
88 89 for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
89 90 cmd = instance->cmd_list[i];
90 91 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
91 92 (void) mrsas_free_dma_obj(instance,
92 93 cmd->frame_dma_obj);
93 94 }
94 95 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
95 96 }
96 97 }
97 98
98 99 /*
99 100 * destroy_mpi2_frame_pool
100 101 */
101 102 void
102 103 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
103 104 {
104 105
105 106 if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
106 107 (void) mrsas_free_dma_obj(instance,
107 108 instance->mpi2_frame_pool_dma_obj);
108 109 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
109 110 }
110 111 }
111 112
112 113
113 114 /*
114 115 * mrsas_tbolt_free_additional_dma_buffer
115 116 */
116 117 void
117 118 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
118 119 {
119 120 int i;
120 121
121 122 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
122 123 (void) mrsas_free_dma_obj(instance,
123 124 instance->mfi_internal_dma_obj);
124 125 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
125 126 }
126 127 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
127 128 (void) mrsas_free_dma_obj(instance,
128 129 instance->mfi_evt_detail_obj);
129 130 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
130 131 }
131 132
132 133 for (i = 0; i < 2; i++) {
133 134 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
134 135 (void) mrsas_free_dma_obj(instance,
135 136 instance->ld_map_obj[i]);
136 137 instance->ld_map_obj[i].status = DMA_OBJ_FREED;
137 138 }
138 139 }
139 140 }
140 141
141 142
142 143 /*
143 144 * free_req_desc_pool
144 145 */
145 146 void
146 147 free_req_rep_desc_pool(struct mrsas_instance *instance)
147 148 {
148 149 if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
149 150 (void) mrsas_free_dma_obj(instance,
150 151 instance->request_desc_dma_obj);
151 152 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
152 153 }
153 154
154 155 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
155 156 (void) mrsas_free_dma_obj(instance,
156 157 instance->reply_desc_dma_obj);
157 158 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
158 159 }
159 160
160 161
161 162 }
162 163
163 164
164 165 /*
165 166 * ThunderBolt(TB) Request Message Frame Pool
166 167 */
167 168 int
168 169 create_mpi2_frame_pool(struct mrsas_instance *instance)
169 170 {
170 171 int i = 0;
171 172 uint16_t max_cmd;
172 173 uint32_t sgl_sz;
173 174 uint32_t raid_msg_size;
174 175 uint32_t total_size;
175 176 uint32_t offset;
176 177 uint32_t io_req_base_phys;
177 178 uint8_t *io_req_base;
178 179 struct mrsas_cmd *cmd;
179 180
180 181 max_cmd = instance->max_fw_cmds;
181 182
182 183 sgl_sz = 1024;
183 184 raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
184 185
185 186 /* Allocating additional 256 bytes to accomodate SMID 0. */
186 187 total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
187 188 (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
188 189
189 190 con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
190 191 "max_cmd %x", max_cmd));
191 192
192 193 con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
193 194 "request message frame pool size %x", total_size));
194 195
195 196 /*
196 197 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
197 198 * and then split the memory to 1024 commands. Each command should be
198 199 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
199 200 * within it. Further refer the "alloc_req_rep_desc" function where
200 201 * we allocate request/reply descriptors queues for a clue.
201 202 */
202 203
203 204 instance->mpi2_frame_pool_dma_obj.size = total_size;
204 205 instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
205 206 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
206 207 0xFFFFFFFFU;
207 208 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
208 209 0xFFFFFFFFU;
209 210 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
210 211 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
211 212
212 213 if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
213 214 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
214 215 cmn_err(CE_WARN,
215 216 "mr_sas: could not alloc mpi2 frame pool");
216 217 return (DDI_FAILURE);
217 218 }
218 219
219 220 bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
220 221 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
221 222
222 223 instance->io_request_frames =
223 224 (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
224 225 instance->io_request_frames_phy =
225 226 (uint32_t)
226 227 instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
227 228
228 229 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
229 230 (void *)instance->io_request_frames));
230 231
231 232 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
232 233 instance->io_request_frames_phy));
233 234
234 235 io_req_base = (uint8_t *)instance->io_request_frames +
235 236 MRSAS_THUNDERBOLT_MSG_SIZE;
236 237 io_req_base_phys = instance->io_request_frames_phy +
237 238 MRSAS_THUNDERBOLT_MSG_SIZE;
238 239
239 240 con_log(CL_DLEVEL3, (CE_NOTE,
240 241 "io req_base_phys 0x%x", io_req_base_phys));
241 242
242 243 for (i = 0; i < max_cmd; i++) {
243 244 cmd = instance->cmd_list[i];
244 245
245 246 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
246 247
247 248 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
248 249 ((uint8_t *)io_req_base + offset);
249 250 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
250 251
251 252 cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
252 253 (max_cmd * raid_msg_size) + i * sgl_sz);
253 254
254 255 cmd->sgl_phys_addr = (io_req_base_phys +
255 256 (max_cmd * raid_msg_size) + i * sgl_sz);
256 257
257 258 cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
258 259 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
259 260 (i * SENSE_LENGTH));
260 261
261 262 cmd->sense_phys_addr1 = (io_req_base_phys +
262 263 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
263 264 (i * SENSE_LENGTH));
264 265
265 266
266 267 cmd->SMID = i + 1;
267 268
268 269 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
269 270 cmd->index, (void *)cmd->scsi_io_request));
270 271
271 272 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
272 273 cmd->index, cmd->scsi_io_request_phys_addr));
273 274
274 275 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
275 276 cmd->index, (void *)cmd->sense1));
276 277
277 278 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
278 279 cmd->index, cmd->sense_phys_addr1));
279 280
280 281 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
281 282 cmd->index, (void *)cmd->sgl));
282 283
283 284 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
284 285 cmd->index, cmd->sgl_phys_addr));
285 286 }
286 287
287 288 return (DDI_SUCCESS);
288 289
289 290 }
290 291
291 292
292 293 /*
293 294 * alloc_additional_dma_buffer for AEN
294 295 */
295 296 int
296 297 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
297 298 {
298 299 uint32_t internal_buf_size = PAGESIZE*2;
299 300 int i;
300 301
301 302 /* Initialize buffer status as free */
302 303 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
303 304 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
304 305 instance->ld_map_obj[0].status = DMA_OBJ_FREED;
305 306 instance->ld_map_obj[1].status = DMA_OBJ_FREED;
306 307
307 308
308 309 instance->mfi_internal_dma_obj.size = internal_buf_size;
309 310 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
310 311 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
311 312 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
312 313 0xFFFFFFFFU;
313 314 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
314 315
315 316 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
316 317 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
317 318 cmn_err(CE_WARN,
318 319 "mr_sas: could not alloc reply queue");
319 320 return (DDI_FAILURE);
320 321 }
321 322
322 323 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
323 324
324 325 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
325 326 instance->internal_buf =
326 327 (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
327 328 instance->internal_buf_dmac_add =
328 329 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
329 330 instance->internal_buf_size = internal_buf_size;
330 331
331 332 /* allocate evt_detail */
332 333 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
333 334 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
334 335 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
335 336 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
336 337 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
337 338 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
338 339
339 340 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
340 341 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
341 342 cmn_err(CE_WARN, "mrsas_tbolt_alloc_additional_dma_buffer: "
342 343 "could not allocate data transfer buffer.");
343 344 goto fail_tbolt_additional_buff;
344 345 }
345 346
346 347 bzero(instance->mfi_evt_detail_obj.buffer,
347 348 sizeof (struct mrsas_evt_detail));
348 349
349 350 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
350 351
351 352 instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
352 353 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
353 354
354 355 for (i = 0; i < 2; i++) {
355 356 /* allocate the data transfer buffer */
356 357 instance->ld_map_obj[i].size = instance->size_map_info;
357 358 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
358 359 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
359 360 instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
360 361 0xFFFFFFFFU;
361 362 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
362 363 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
363 364
364 365 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
365 366 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
366 367 cmn_err(CE_WARN,
367 368 "could not allocate data transfer buffer.");
368 369 goto fail_tbolt_additional_buff;
369 370 }
370 371
371 372 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
372 373
373 374 bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
374 375
375 376 instance->ld_map[i] =
376 377 (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
377 378 instance->ld_map_phy[i] = (uint32_t)instance->
378 379 ld_map_obj[i].dma_cookie[0].dmac_address;
379 380
380 381 con_log(CL_DLEVEL3, (CE_NOTE,
381 382 "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
382 383
383 384 con_log(CL_DLEVEL3, (CE_NOTE,
384 385 "size_map_info 0x%x", instance->size_map_info));
385 386 }
386 387
387 388 return (DDI_SUCCESS);
388 389
389 390 fail_tbolt_additional_buff:
390 391 mrsas_tbolt_free_additional_dma_buffer(instance);
391 392
392 393 return (DDI_FAILURE);
393 394 }
394 395
395 396 MRSAS_REQUEST_DESCRIPTOR_UNION *
396 397 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
397 398 {
398 399 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
399 400
400 401 if (index > instance->max_fw_cmds) {
401 402 con_log(CL_ANN1, (CE_NOTE,
402 403 "Invalid SMID 0x%x request for descriptor", index));
403 404 con_log(CL_ANN1, (CE_NOTE,
404 405 "max_fw_cmds : 0x%x", instance->max_fw_cmds));
405 406 return (NULL);
406 407 }
407 408
408 409 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
409 410 ((char *)instance->request_message_pool +
410 411 (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
411 412
412 413 con_log(CL_ANN1, (CE_NOTE,
413 414 "request descriptor : 0x%08lx", (unsigned long)req_desc));
414 415
415 416 con_log(CL_ANN1, (CE_NOTE,
416 417 "request descriptor base phy : 0x%08lx",
417 418 (unsigned long)instance->request_message_pool_phy));
418 419
419 420 return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
420 421 }
421 422
422 423
423 424 /*
424 425 * Allocate Request and Reply Queue Descriptors.
425 426 */
426 427 int
427 428 alloc_req_rep_desc(struct mrsas_instance *instance)
428 429 {
429 430 uint32_t request_q_sz, reply_q_sz;
430 431 int i, max_reply_q_sz;
431 432 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
432 433
433 434 /*
434 435 * ThunderBolt(TB) There's no longer producer consumer mechanism.
435 436 * Once we have an interrupt we are supposed to scan through the list of
436 437 * reply descriptors and process them accordingly. We would be needing
437 438 * to allocate memory for 1024 reply descriptors
438 439 */
439 440
440 441 /* Allocate Reply Descriptors */
441 442 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
442 443 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
443 444
444 445 /* reply queue size should be multiple of 16 */
445 446 max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
446 447
447 448 reply_q_sz = 8 * max_reply_q_sz;
448 449
449 450
450 451 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
451 452 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
452 453
453 454 instance->reply_desc_dma_obj.size = reply_q_sz;
454 455 instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
455 456 instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
456 457 instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
457 458 instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
458 459 instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
459 460
460 461 if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
461 462 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
462 463 cmn_err(CE_WARN,
463 464 "mr_sas: could not alloc reply queue");
464 465 return (DDI_FAILURE);
465 466 }
466 467
467 468 bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
468 469 instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
469 470
470 471 /* virtual address of reply queue */
471 472 instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
472 473 instance->reply_desc_dma_obj.buffer);
473 474
474 475 instance->reply_q_depth = max_reply_q_sz;
475 476
476 477 con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
477 478 instance->reply_q_depth));
478 479
479 480 con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
480 481 (void *)instance->reply_frame_pool));
481 482
482 483 /* initializing reply address to 0xFFFFFFFF */
483 484 reply_desc = instance->reply_frame_pool;
484 485
485 486 for (i = 0; i < instance->reply_q_depth; i++) {
486 487 reply_desc->Words = (uint64_t)~0;
487 488 reply_desc++;
488 489 }
489 490
490 491
491 492 instance->reply_frame_pool_phy =
492 493 (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
493 494
494 495 con_log(CL_ANN1, (CE_NOTE,
495 496 "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
496 497
497 498
498 499 instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
499 500 reply_q_sz);
500 501
501 502 con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
502 503 instance->reply_pool_limit_phy));
503 504
504 505
505 506 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
506 507 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
507 508
508 509 /* Allocate Request Descriptors */
509 510 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
510 511 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
511 512
512 513 request_q_sz = 8 *
513 514 (instance->max_fw_cmds);
514 515
515 516 instance->request_desc_dma_obj.size = request_q_sz;
516 517 instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
517 518 instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
518 519 instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
519 520 0xFFFFFFFFU;
520 521 instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
521 522 instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
522 523
523 524 if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
524 525 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
525 526 cmn_err(CE_WARN,
526 527 "mr_sas: could not alloc request queue desc");
527 528 goto fail_undo_reply_queue;
528 529 }
529 530
530 531 bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
531 532 instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
532 533
533 534 /* virtual address of request queue desc */
534 535 instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
535 536 (instance->request_desc_dma_obj.buffer);
536 537
537 538 instance->request_message_pool_phy =
538 539 (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
539 540
540 541 return (DDI_SUCCESS);
541 542
542 543 fail_undo_reply_queue:
543 544 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
544 545 (void) mrsas_free_dma_obj(instance,
545 546 instance->reply_desc_dma_obj);
546 547 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
547 548 }
548 549
549 550 return (DDI_FAILURE);
550 551 }
551 552
552 553 /*
553 554 * mrsas_alloc_cmd_pool_tbolt
554 555 *
555 556 * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
556 557 * routine
557 558 */
558 559 int
559 560 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
560 561 {
561 562 int i;
562 563 int count;
563 564 uint32_t max_cmd;
564 565 uint32_t reserve_cmd;
565 566 size_t sz;
566 567
567 568 struct mrsas_cmd *cmd;
568 569
569 570 max_cmd = instance->max_fw_cmds;
570 571 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
571 572 "max_cmd %x", max_cmd));
572 573
573 574
574 575 sz = sizeof (struct mrsas_cmd *) * max_cmd;
575 576
576 577 /*
577 578 * instance->cmd_list is an array of struct mrsas_cmd pointers.
578 579 * Allocate the dynamic array first and then allocate individual
579 580 * commands.
580 581 */
581 582 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
582 583
583 584 /* create a frame pool and assign one frame to each cmd */
584 585 for (count = 0; count < max_cmd; count++) {
585 586 instance->cmd_list[count] =
586 587 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
587 588 }
588 589
589 590 /* add all the commands to command pool */
590 591
591 592 INIT_LIST_HEAD(&instance->cmd_pool_list);
592 593 INIT_LIST_HEAD(&instance->cmd_pend_list);
593 594 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
594 595
595 596 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
596 597
597 598 /* cmd index 0 reservered for IOC INIT */
598 599 for (i = 1; i < reserve_cmd; i++) {
599 600 cmd = instance->cmd_list[i];
600 601 cmd->index = i;
601 602 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
602 603 }
603 604
604 605
605 606 for (i = reserve_cmd; i < max_cmd; i++) {
606 607 cmd = instance->cmd_list[i];
607 608 cmd->index = i;
608 609 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
609 610 }
610 611
611 612 return (DDI_SUCCESS);
612 613
613 614 mrsas_undo_cmds:
614 615 if (count > 0) {
615 616 /* free each cmd */
616 617 for (i = 0; i < count; i++) {
617 618 if (instance->cmd_list[i] != NULL) {
618 619 kmem_free(instance->cmd_list[i],
619 620 sizeof (struct mrsas_cmd));
620 621 }
621 622 instance->cmd_list[i] = NULL;
622 623 }
623 624 }
624 625
625 626 mrsas_undo_cmd_list:
626 627 if (instance->cmd_list != NULL)
627 628 kmem_free(instance->cmd_list, sz);
628 629 instance->cmd_list = NULL;
629 630
630 631 return (DDI_FAILURE);
631 632 }
632 633
633 634
634 635 /*
635 636 * free_space_for_mpi2
636 637 */
637 638 void
638 639 free_space_for_mpi2(struct mrsas_instance *instance)
639 640 {
640 641 /* already freed */
641 642 if (instance->cmd_list == NULL) {
642 643 return;
643 644 }
644 645
645 646 /* First free the additional DMA buffer */
646 647 mrsas_tbolt_free_additional_dma_buffer(instance);
647 648
648 649 /* Free the request/reply descriptor pool */
649 650 free_req_rep_desc_pool(instance);
650 651
651 652 /* Free the MPI message pool */
652 653 destroy_mpi2_frame_pool(instance);
653 654
654 655 /* Free the MFI frame pool */
655 656 destroy_mfi_frame_pool(instance);
656 657
657 658 /* Free all the commands in the cmd_list */
658 659 /* Free the cmd_list buffer itself */
659 660 mrsas_free_cmd_pool(instance);
660 661 }
661 662
662 663
663 664 /*
664 665 * ThunderBolt(TB) memory allocations for commands/messages/frames.
665 666 */
666 667 int
667 668 alloc_space_for_mpi2(struct mrsas_instance *instance)
668 669 {
669 670 /* Allocate command pool (memory for cmd_list & individual commands) */
670 671 if (mrsas_alloc_cmd_pool_tbolt(instance)) {
671 672 cmn_err(CE_WARN, "Error creating cmd pool");
672 673 return (DDI_FAILURE);
673 674 }
674 675
675 676 /* Initialize single reply size and Message size */
676 677 instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
677 678 instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
678 679
679 680 instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
680 681 (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
681 682 sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
682 683 instance->max_sge_in_chain = (MR_COMMAND_SIZE -
683 684 MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
684 685
685 686 /* Reduce SG count by 1 to take care of group cmds feature in FW */
686 687 instance->max_num_sge = (instance->max_sge_in_main_msg +
687 688 instance->max_sge_in_chain - 2);
688 689 instance->chain_offset_mpt_msg =
689 690 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
690 691 instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
691 692 sizeof (MPI2_SGE_IO_UNION)) / 16;
692 693 instance->reply_read_index = 0;
693 694
694 695
695 696 /* Allocate Request and Reply descriptors Array */
696 697 /* Make sure the buffer is aligned to 8 for req/rep descriptor Pool */
697 698 if (alloc_req_rep_desc(instance)) {
698 699 cmn_err(CE_WARN,
699 700 "Error, allocating memory for descripter-pool");
700 701 goto mpi2_undo_cmd_pool;
701 702 }
702 703 con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
703 704 instance->request_message_pool_phy));
704 705
705 706
706 707 /* Allocate MFI Frame pool - for MPI-MFI passthru commands */
707 708 if (create_mfi_frame_pool(instance)) {
708 709 cmn_err(CE_WARN,
709 710 "Error, allocating memory for MFI frame-pool");
710 711 goto mpi2_undo_descripter_pool;
711 712 }
712 713
713 714
714 715 /* Allocate MPI2 Message pool */
715 716 /*
716 717 * Make sure the buffer is alligned to 256 for raid message packet
717 718 * create a io request pool and assign one frame to each cmd
718 719 */
719 720
720 721 if (create_mpi2_frame_pool(instance)) {
721 722 cmn_err(CE_WARN,
722 723 "Error, allocating memory for MPI2 Message-pool");
723 724 goto mpi2_undo_mfi_frame_pool;
724 725 }
725 726
726 727 #ifdef DEBUG
727 728 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
728 729 instance->max_sge_in_main_msg));
729 730 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
730 731 instance->max_sge_in_chain));
731 732 con_log(CL_ANN1, (CE_CONT,
732 733 "[max_sge]0x%x", instance->max_num_sge));
733 734 con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
734 735 instance->chain_offset_mpt_msg));
735 736 con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
736 737 instance->chain_offset_io_req));
737 738 #endif
738 739
739 740
740 741 /* Allocate additional dma buffer */
741 742 if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
742 743 cmn_err(CE_WARN,
743 744 "Error, allocating tbolt additional DMA buffer");
744 745 goto mpi2_undo_message_pool;
745 746 }
746 747
747 748 return (DDI_SUCCESS);
748 749
749 750 mpi2_undo_message_pool:
750 751 destroy_mpi2_frame_pool(instance);
751 752
752 753 mpi2_undo_mfi_frame_pool:
753 754 destroy_mfi_frame_pool(instance);
754 755
755 756 mpi2_undo_descripter_pool:
756 757 free_req_rep_desc_pool(instance);
757 758
758 759 mpi2_undo_cmd_pool:
759 760 mrsas_free_cmd_pool(instance);
760 761
761 762 return (DDI_FAILURE);
762 763 }
763 764
764 765
765 766 /*
766 767 * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
767 768 */
768 769 int
769 770 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
770 771 {
771 772
772 773 /*
773 774 * Reduce the max supported cmds by 1. This is to ensure that the
774 775 * reply_q_sz (1 more than the max cmd that driver may send)
775 776 * does not exceed max cmds that the FW can support
776 777 */
777 778
778 779 if (instance->max_fw_cmds > 1008) {
779 780 instance->max_fw_cmds = 1008;
780 781 instance->max_fw_cmds = instance->max_fw_cmds-1;
781 782 }
782 783
783 784 con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
784 785 " instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
785 786
786 787
787 788 /* create a pool of commands */
788 789 if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
789 790 cmn_err(CE_WARN,
790 791 " alloc_space_for_mpi2() failed.");
791 792
792 793 return (DDI_FAILURE);
793 794 }
794 795
795 796 /* Send ioc init message */
796 797 /* NOTE: the issue_init call does FMA checking already. */
797 798 if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
798 799 cmn_err(CE_WARN,
799 800 " mrsas_issue_init_mpi2() failed.");
800 801
801 802 goto fail_init_fusion;
802 803 }
803 804
804 805 instance->unroll.alloc_space_mpi2 = 1;
805 806
806 807 con_log(CL_ANN, (CE_NOTE,
807 808 "mrsas_init_adapter_tbolt: SUCCESSFUL"));
808 809
809 810 return (DDI_SUCCESS);
810 811
811 812 fail_init_fusion:
812 813 free_space_for_mpi2(instance);
813 814
814 815 return (DDI_FAILURE);
815 816 }
816 817
817 818
818 819
819 820 /*
820 821 * init_mpi2
821 822 */
822 823 int
823 824 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
824 825 {
825 826 dma_obj_t init2_dma_obj;
826 827 int ret_val = DDI_SUCCESS;
827 828
828 829 /* allocate DMA buffer for IOC INIT message */
829 830 init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
830 831 init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
831 832 init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
832 833 init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
833 834 init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
834 835 init2_dma_obj.dma_attr.dma_attr_align = 256;
835 836
836 837 if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
837 838 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
838 839 cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 "
839 840 "could not allocate data transfer buffer.");
840 841 return (DDI_FAILURE);
841 842 }
842 843 (void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
843 844
844 845 con_log(CL_ANN1, (CE_NOTE,
845 846 "mrsas_issue_init_mpi2 _phys adr: %x",
846 847 init2_dma_obj.dma_cookie[0].dmac_address));
847 848
848 849
849 850 /* Initialize and send ioc init message */
850 851 ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
851 852 if (ret_val == DDI_FAILURE) {
852 853 con_log(CL_ANN1, (CE_WARN,
853 854 "mrsas_issue_init_mpi2: Failed"));
854 855 goto fail_init_mpi2;
855 856 }
856 857
857 858 /* free IOC init DMA buffer */
858 859 if (mrsas_free_dma_obj(instance, init2_dma_obj)
859 860 != DDI_SUCCESS) {
860 861 con_log(CL_ANN1, (CE_WARN,
861 862 "mrsas_issue_init_mpi2: Free Failed"));
862 863 return (DDI_FAILURE);
863 864 }
864 865
865 866 /* Get/Check and sync ld_map info */
866 867 instance->map_id = 0;
867 868 if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
868 869 (void) mrsas_tbolt_sync_map_info(instance);
869 870
870 871
871 872 /* No mrsas_cmd to send, so send NULL. */
872 873 if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
873 874 goto fail_init_mpi2;
874 875
875 876 con_log(CL_ANN, (CE_NOTE,
876 877 "mrsas_issue_init_mpi2: SUCCESSFUL"));
877 878
878 879 return (DDI_SUCCESS);
879 880
880 881 fail_init_mpi2:
881 882 (void) mrsas_free_dma_obj(instance, init2_dma_obj);
882 883
883 884 return (DDI_FAILURE);
884 885 }
885 886
886 887 static int
887 888 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
888 889 {
889 890 int numbytes;
890 891 uint16_t flags;
891 892 struct mrsas_init_frame2 *mfiFrameInit2;
892 893 struct mrsas_header *frame_hdr;
893 894 Mpi2IOCInitRequest_t *init;
894 895 struct mrsas_cmd *cmd = NULL;
895 896 struct mrsas_drv_ver drv_ver_info;
896 897 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
897 898
898 899 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
899 900
900 901
901 902 #ifdef DEBUG
902 903 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
903 904 (int)sizeof (*mfiFrameInit2)));
904 905 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
905 906 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
906 907 (int)sizeof (struct mrsas_init_frame2)));
907 908 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
908 909 (int)sizeof (Mpi2IOCInitRequest_t)));
909 910 #endif
910 911
911 912 init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
912 913 numbytes = sizeof (*init);
913 914 bzero(init, numbytes);
914 915
915 916 ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
916 917 MPI2_FUNCTION_IOC_INIT);
917 918
918 919 ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
919 920 MPI2_WHOINIT_HOST_DRIVER);
920 921
921 922 /* set MsgVersion and HeaderVersion host driver was built with */
922 923 ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
923 924 MPI2_VERSION);
924 925
925 926 ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
926 927 MPI2_HEADER_VERSION);
927 928
928 929 ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
929 930 instance->raid_io_msg_size / 4);
930 931
931 932 ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
932 933 0);
933 934
934 935 ddi_put16(mpi2_dma_obj->acc_handle,
935 936 &init->ReplyDescriptorPostQueueDepth,
936 937 instance->reply_q_depth);
937 938 /*
938 939 * These addresses are set using the DMA cookie addresses from when the
939 940 * memory was allocated. Sense buffer hi address should be 0.
940 941 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
941 942 */
942 943
943 944 ddi_put32(mpi2_dma_obj->acc_handle,
944 945 &init->SenseBufferAddressHigh, 0);
945 946
946 947 ddi_put64(mpi2_dma_obj->acc_handle,
947 948 (uint64_t *)&init->SystemRequestFrameBaseAddress,
948 949 instance->io_request_frames_phy);
949 950
950 951 ddi_put64(mpi2_dma_obj->acc_handle,
951 952 &init->ReplyDescriptorPostQueueAddress,
952 953 instance->reply_frame_pool_phy);
953 954
954 955 ddi_put64(mpi2_dma_obj->acc_handle,
955 956 &init->ReplyFreeQueueAddress, 0);
956 957
957 958 cmd = instance->cmd_list[0];
958 959 if (cmd == NULL) {
959 960 return (DDI_FAILURE);
960 961 }
961 962 cmd->retry_count_for_ocr = 0;
962 963 cmd->pkt = NULL;
963 964 cmd->drv_pkt_time = 0;
964 965
965 966 mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
966 967 con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
967 968
968 969 frame_hdr = &cmd->frame->hdr;
969 970
970 971 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
971 972 MFI_CMD_STATUS_POLL_MODE);
972 973
973 974 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
974 975
975 976 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
976 977
977 978 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
978 979
979 980 con_log(CL_ANN, (CE_CONT,
980 981 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
981 982
982 983 /* Init the MFI Header */
983 984 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
984 985 &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
985 986
986 987 con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
987 988
988 989 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
989 990 &mfiFrameInit2->cmd_status,
990 991 MFI_STAT_INVALID_STATUS);
991 992
992 993 con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
993 994
994 995 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
995 996 &mfiFrameInit2->queue_info_new_phys_addr_lo,
996 997 mpi2_dma_obj->dma_cookie[0].dmac_address);
997 998
998 999 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
999 1000 &mfiFrameInit2->data_xfer_len,
1000 1001 sizeof (Mpi2IOCInitRequest_t));
1001 1002
1002 1003 con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1003 1004 (int)init->ReplyDescriptorPostQueueAddress));
1004 1005
1005 1006 /* fill driver version information */
1006 1007 fill_up_drv_ver(&drv_ver_info);
1007 1008
1008 1009 /* allocate the driver version data transfer buffer */
1009 1010 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1010 1011 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1011 1012 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1012 1013 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1013 1014 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1014 1015 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1015 1016
1016 1017 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1017 1018 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1018 1019 cmn_err(CE_WARN,
1019 1020 "fusion init: Could not allocate driver version buffer.");
1020 1021 return (DDI_FAILURE);
1021 1022 }
1022 1023 /* copy driver version to dma buffer */
1023 1024 bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1024 1025 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1025 1026 (uint8_t *)drv_ver_info.drv_ver,
1026 1027 (uint8_t *)instance->drv_ver_dma_obj.buffer,
1027 1028 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1028 1029
1029 1030 /* send driver version physical address to firmware */
1030 1031 ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1031 1032 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1032 1033
1033 1034 con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1034 1035 mfiFrameInit2->queue_info_new_phys_addr_lo,
1035 1036 (int)sizeof (Mpi2IOCInitRequest_t)));
1036 1037
1037 1038 con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1038 1039
1039 1040 con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1040 1041 cmd->scsi_io_request_phys_addr,
1041 1042 (int)sizeof (struct mrsas_init_frame2)));
1042 1043
1043 1044 /* disable interrupts before sending INIT2 frame */
1044 1045 instance->func_ptr->disable_intr(instance);
1045 1046
1046 1047 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
1047 1048 instance->request_message_pool;
1048 1049 req_desc->Words = cmd->scsi_io_request_phys_addr;
1049 1050 req_desc->MFAIo.RequestFlags =
1050 1051 (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1051 1052
1052 1053 cmd->request_desc = req_desc;
1053 1054
1054 1055 /* issue the init frame */
1055 1056 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1056 1057
1057 1058 con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1058 1059 con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ",
1059 1060 frame_hdr->cmd_status));
1060 1061
1061 1062 if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1062 1063 &mfiFrameInit2->cmd_status) == 0) {
1063 1064 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1064 1065 } else {
1065 1066 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1066 1067 mrsas_dump_reply_desc(instance);
1067 1068 goto fail_ioc_init;
1068 1069 }
1069 1070
1070 1071 mrsas_dump_reply_desc(instance);
1071 1072
1072 1073 instance->unroll.verBuff = 1;
1073 1074
1074 1075 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1075 1076
1076 1077 return (DDI_SUCCESS);
1077 1078
1078 1079
1079 1080 fail_ioc_init:
1080 1081
1081 1082 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1082 1083
1083 1084 return (DDI_FAILURE);
1084 1085 }
1085 1086
1086 1087 int
1087 1088 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1088 1089 {
1089 1090 int i;
1090 1091 uint32_t wait_time = dump_io_wait_time;
1091 1092 for (i = 0; i < wait_time; i++) {
1092 1093 /*
1093 1094 * Check For Outstanding poll Commands
1094 1095 * except ldsync command and aen command
1095 1096 */
1096 1097 if (instance->fw_outstanding <= 2) {
1097 1098 break;
1098 1099 }
1099 1100 drv_usecwait(10*MILLISEC);
1100 1101 /* complete commands from reply queue */
1101 1102 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1102 1103 }
1103 1104 if (instance->fw_outstanding > 2) {
1104 1105 return (1);
1105 1106 }
1106 1107 return (0);
1107 1108 }
1108 1109 /*
1109 1110 * scsi_pkt handling
1110 1111 *
1111 1112 * Visible to the external world via the transport structure.
1112 1113 */
1113 1114
1114 1115 int
1115 1116 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1116 1117 {
1117 1118 struct mrsas_instance *instance = ADDR2MR(ap);
1118 1119 struct scsa_cmd *acmd = PKT2CMD(pkt);
1119 1120 struct mrsas_cmd *cmd = NULL;
1120 1121 uchar_t cmd_done = 0;
1121 1122
1122 1123 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1123 1124 if (instance->deadadapter == 1) {
1124 1125 cmn_err(CE_WARN,
1125 1126 "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1126 1127 "for IO, as the HBA doesnt take any more IOs");
1127 1128 if (pkt) {
1128 1129 pkt->pkt_reason = CMD_DEV_GONE;
1129 1130 pkt->pkt_statistics = STAT_DISCON;
1130 1131 }
1131 1132 return (TRAN_FATAL_ERROR);
1132 1133 }
1133 1134 if (instance->adapterresetinprogress) {
1134 1135 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1135 1136 "returning mfi_pkt and setting TRAN_BUSY\n"));
1136 1137 return (TRAN_BUSY);
1137 1138 }
1138 1139 (void) mrsas_tbolt_prepare_pkt(acmd);
1139 1140
1140 1141 cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1141 1142
1142 1143 /*
1143 1144 * Check if the command is already completed by the mrsas_build_cmd()
1144 1145 * routine. In which case the busy_flag would be clear and scb will be
1145 1146 * NULL and appropriate reason provided in pkt_reason field
1146 1147 */
1147 1148 if (cmd_done) {
1148 1149 pkt->pkt_reason = CMD_CMPLT;
1149 1150 pkt->pkt_scbp[0] = STATUS_GOOD;
1150 1151 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1151 1152 | STATE_SENT_CMD;
1152 1153 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1153 1154 (*pkt->pkt_comp)(pkt);
1154 1155 }
1155 1156
1156 1157 return (TRAN_ACCEPT);
1157 1158 }
1158 1159
1159 1160 if (cmd == NULL) {
1160 1161 return (TRAN_BUSY);
1161 1162 }
1162 1163
1163 1164
1164 1165 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1165 1166 if (instance->fw_outstanding > instance->max_fw_cmds) {
1166 1167 cmn_err(CE_WARN,
1167 1168 "Command Queue Full... Returning BUSY");
1168 1169 return_raid_msg_pkt(instance, cmd);
1169 1170 return (TRAN_BUSY);
1170 1171 }
1171 1172
1172 1173 /* Synchronize the Cmd frame for the controller */
1173 1174 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1174 1175 DDI_DMA_SYNC_FORDEV);
1175 1176
1176 1177 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1177 1178 "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1178 1179 cmd->index, cmd->SMID));
1179 1180
1180 1181 instance->func_ptr->issue_cmd(cmd, instance);
1181 1182 } else {
1182 1183 instance->func_ptr->issue_cmd(cmd, instance);
1183 1184 (void) wait_for_outstanding_poll_io(instance);
1184 1185 (void) mrsas_common_check(instance, cmd);
1185 1186 }
1186 1187
1187 1188 return (TRAN_ACCEPT);
1188 1189 }
1189 1190
1190 1191 /*
1191 1192 * prepare the pkt:
1192 1193 * the pkt may have been resubmitted or just reused so
1193 1194 * initialize some fields and do some checks.
1194 1195 */
1195 1196 static int
1196 1197 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1197 1198 {
1198 1199 struct scsi_pkt *pkt = CMD2PKT(acmd);
1199 1200
1200 1201
1201 1202 /*
1202 1203 * Reinitialize some fields that need it; the packet may
1203 1204 * have been resubmitted
1204 1205 */
1205 1206 pkt->pkt_reason = CMD_CMPLT;
1206 1207 pkt->pkt_state = 0;
1207 1208 pkt->pkt_statistics = 0;
1208 1209 pkt->pkt_resid = 0;
1209 1210
1210 1211 /*
1211 1212 * zero status byte.
1212 1213 */
1213 1214 *(pkt->pkt_scbp) = 0;
1214 1215
1215 1216 return (0);
1216 1217 }
1217 1218
1218 1219
1219 1220 int
1220 1221 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1221 1222 struct scsa_cmd *acmd,
1222 1223 struct mrsas_cmd *cmd,
1223 1224 Mpi2RaidSCSIIORequest_t *scsi_raid_io,
↓ open down ↓ |
1194 lines elided |
↑ open up ↑ |
1224 1225 uint32_t *datalen)
1225 1226 {
1226 1227 uint32_t MaxSGEs;
1227 1228 int sg_to_process;
1228 1229 uint32_t i, j;
1229 1230 uint32_t numElements, endElement;
1230 1231 Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
1231 1232 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
1232 1233 ddi_acc_handle_t acc_handle =
1233 1234 instance->mpi2_frame_pool_dma_obj.acc_handle;
1235 + uint16_t devid = instance->device_id;
1234 1236
1235 1237 con_log(CL_ANN1, (CE_NOTE,
1236 1238 "chkpnt: Building Chained SGL :%d", __LINE__));
1237 1239
1238 1240 /* Calulate SGE size in number of Words(32bit) */
1239 1241 /* Clear the datalen before updating it. */
1240 1242 *datalen = 0;
1241 1243
1242 1244 MaxSGEs = instance->max_sge_in_main_msg;
1243 1245
1244 1246 ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1245 1247 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1246 1248
1247 1249 /* set data transfer flag. */
1248 1250 if (acmd->cmd_flags & CFLAG_DMASEND) {
1249 1251 ddi_put32(acc_handle, &scsi_raid_io->Control,
1250 1252 MPI2_SCSIIO_CONTROL_WRITE);
1251 1253 } else {
1252 1254 ddi_put32(acc_handle, &scsi_raid_io->Control,
1253 1255 MPI2_SCSIIO_CONTROL_READ);
1254 1256 }
1255 1257
1256 1258
1257 1259 numElements = acmd->cmd_cookiecnt;
1258 1260
1259 1261 con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1260 1262
1261 1263 if (numElements > instance->max_num_sge) {
1262 1264 con_log(CL_ANN, (CE_NOTE,
1263 1265 "[Max SGE Count Exceeded]:%x", numElements));
1264 1266 return (numElements);
1265 1267 }
1266 1268
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
1267 1269 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1268 1270 (uint8_t)numElements);
1269 1271
1270 1272 /* set end element in main message frame */
1271 1273 endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1272 1274
1273 1275 /* prepare the scatter-gather list for the firmware */
1274 1276 scsi_raid_io_sgl_ieee =
1275 1277 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1276 1278
1277 - if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1279 + if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1280 + (devid == PCI_DEVICE_ID_LSI_FURY)) {
1278 1281 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1279 1282 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1280 1283
1281 1284 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1282 1285 }
1283 1286
1284 1287 for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1285 1288 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1286 1289 acmd->cmd_dmacookies[i].dmac_laddress);
1287 1290
1288 1291 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1289 1292 acmd->cmd_dmacookies[i].dmac_size);
1290 1293
1291 1294 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1292 1295
1293 - if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1296 + if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1297 + (devid == PCI_DEVICE_ID_LSI_FURY)) {
1294 1298 if (i == (numElements - 1)) {
1295 1299 ddi_put8(acc_handle,
1296 1300 &scsi_raid_io_sgl_ieee->Flags,
1297 1301 IEEE_SGE_FLAGS_END_OF_LIST);
1298 1302 }
1299 1303 }
1300 1304
1301 1305 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1302 1306
1303 1307 #ifdef DEBUG
1304 1308 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1305 1309 scsi_raid_io_sgl_ieee->Address));
1306 1310 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1307 1311 scsi_raid_io_sgl_ieee->Length));
1308 1312 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1309 1313 scsi_raid_io_sgl_ieee->Flags));
1310 1314 #endif
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
1311 1315
1312 1316 }
1313 1317
1314 1318 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1315 1319
1316 1320 /* check if chained SGL required */
1317 1321 if (i < numElements) {
1318 1322
1319 1323 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1320 1324
1321 - if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1325 + if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1326 + (devid == PCI_DEVICE_ID_LSI_FURY)) {
1322 1327 uint16_t ioFlags =
1323 1328 ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1324 1329
1325 1330 if ((ioFlags &
1326 1331 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1327 1332 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1328 1333 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1329 1334 (U8)instance->chain_offset_io_req);
1330 1335 } else {
1331 1336 ddi_put8(acc_handle,
1332 1337 &scsi_raid_io->ChainOffset, 0);
1333 1338 }
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
1334 1339 } else {
1335 1340 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1336 1341 (U8)instance->chain_offset_io_req);
1337 1342 }
1338 1343
1339 1344 /* prepare physical chain element */
1340 1345 ieeeChainElement = scsi_raid_io_sgl_ieee;
1341 1346
1342 1347 ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1343 1348
1344 - if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1349 + if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1350 + (devid == PCI_DEVICE_ID_LSI_FURY)) {
1345 1351 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1346 1352 IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1347 1353 } else {
1348 1354 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1349 1355 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1350 1356 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1351 1357 }
1352 1358
1353 1359 ddi_put32(acc_handle, &ieeeChainElement->Length,
1354 1360 (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1355 1361
1356 1362 ddi_put64(acc_handle, &ieeeChainElement->Address,
1357 1363 (U64)cmd->sgl_phys_addr);
1358 1364
1359 1365 sg_to_process = numElements - i;
1360 1366
1361 1367 con_log(CL_ANN1, (CE_NOTE,
1362 1368 "[Additional SGE Count]:%x", endElement));
1363 1369
1364 1370 /* point to the chained SGL buffer */
1365 1371 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1366 1372
1367 1373 /* build rest of the SGL in chained buffer */
1368 1374 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
1369 1375 con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1370 1376
1371 1377 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1372 1378 acmd->cmd_dmacookies[i].dmac_laddress);
1373 1379
1374 1380 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1375 1381 acmd->cmd_dmacookies[i].dmac_size);
1376 1382
1377 1383 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1378 1384
1379 - if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1385 + if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1386 + (devid == PCI_DEVICE_ID_LSI_FURY)) {
1380 1387 if (i == (numElements - 1)) {
1381 1388 ddi_put8(acc_handle,
1382 1389 &scsi_raid_io_sgl_ieee->Flags,
1383 1390 IEEE_SGE_FLAGS_END_OF_LIST);
1384 1391 }
1385 1392 }
1386 1393
1387 1394 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1388 1395
1389 1396 #if DEBUG
1390 1397 con_log(CL_DLEVEL1, (CE_NOTE,
1391 1398 "[SGL Address]: %" PRIx64,
1392 1399 scsi_raid_io_sgl_ieee->Address));
1393 1400 con_log(CL_DLEVEL1, (CE_NOTE,
1394 1401 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1395 1402 con_log(CL_DLEVEL1, (CE_NOTE,
1396 1403 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1397 1404 #endif
1398 1405
1399 1406 i++;
1400 1407 }
1401 1408 }
1402 1409
1403 1410 return (0);
1404 1411 } /*end of BuildScatterGather */
1405 1412
1406 1413
1407 1414 /*
1408 1415 * build_cmd
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
1409 1416 */
1410 1417 static struct mrsas_cmd *
1411 1418 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1412 1419 struct scsi_pkt *pkt, uchar_t *cmd_done)
1413 1420 {
1414 1421 uint8_t fp_possible = 0;
1415 1422 uint32_t index;
1416 1423 uint32_t lba_count = 0;
1417 1424 uint32_t start_lba_hi = 0;
1418 1425 uint32_t start_lba_lo = 0;
1426 + uint16_t devid = instance->device_id;
1419 1427 ddi_acc_handle_t acc_handle =
1420 1428 instance->mpi2_frame_pool_dma_obj.acc_handle;
1421 1429 struct mrsas_cmd *cmd = NULL;
1422 1430 struct scsa_cmd *acmd = PKT2CMD(pkt);
1423 1431 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
1424 1432 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1425 1433 uint32_t datalen;
1426 1434 struct IO_REQUEST_INFO io_info;
1427 1435 MR_FW_RAID_MAP_ALL *local_map_ptr;
1428 1436 uint16_t pd_cmd_cdblen;
1429 1437
1430 1438 con_log(CL_DLEVEL1, (CE_NOTE,
1431 1439 "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1432 1440
1433 1441 /* find out if this is logical or physical drive command. */
1434 1442 acmd->islogical = MRDRV_IS_LOGICAL(ap);
1435 1443 acmd->device_id = MAP_DEVICE_ID(instance, ap);
1436 1444
1437 1445 *cmd_done = 0;
1438 1446
1439 1447 /* get the command packet */
1440 1448 if (!(cmd = get_raid_msg_pkt(instance))) {
1441 1449 return (NULL);
1442 1450 }
1443 1451
1444 1452 index = cmd->index;
1445 1453 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
1446 1454 ReqDescUnion->Words = 0;
1447 1455 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1448 1456 ReqDescUnion->SCSIIO.RequestFlags =
1449 1457 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1450 1458 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1451 1459
1452 1460
1453 1461 cmd->request_desc = ReqDescUnion;
1454 1462 cmd->pkt = pkt;
1455 1463 cmd->cmd = acmd;
1456 1464
1457 1465 /* lets get the command directions */
1458 1466 if (acmd->cmd_flags & CFLAG_DMASEND) {
1459 1467 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1460 1468 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1461 1469 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1462 1470 DDI_DMA_SYNC_FORDEV);
1463 1471 }
1464 1472 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1465 1473 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1466 1474 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1467 1475 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1468 1476 DDI_DMA_SYNC_FORCPU);
1469 1477 }
1470 1478 } else {
1471 1479 con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1472 1480 }
1473 1481
1474 1482
1475 1483 /* get SCSI_IO raid message frame pointer */
1476 1484 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1477 1485
1478 1486 /* zero out SCSI_IO raid message frame */
1479 1487 bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1480 1488
1481 1489 /* Set the ldTargetId set by BuildRaidContext() */
1482 1490 ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1483 1491 acmd->device_id);
1484 1492
1485 1493 /* Copy CDB to scsi_io_request message frame */
1486 1494 ddi_rep_put8(acc_handle,
1487 1495 (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1488 1496 acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1489 1497
1490 1498 /*
1491 1499 * Just the CDB length, rest of the Flags are zero
1492 1500 * This will be modified later.
1493 1501 */
1494 1502 ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1495 1503
1496 1504 pd_cmd_cdblen = acmd->cmd_cdblen;
1497 1505
1498 1506 switch (pkt->pkt_cdbp[0]) {
1499 1507 case SCMD_READ:
1500 1508 case SCMD_WRITE:
1501 1509 case SCMD_READ_G1:
1502 1510 case SCMD_WRITE_G1:
1503 1511 case SCMD_READ_G4:
1504 1512 case SCMD_WRITE_G4:
1505 1513 case SCMD_READ_G5:
1506 1514 case SCMD_WRITE_G5:
1507 1515
1508 1516 if (acmd->islogical) {
1509 1517 /* Initialize sense Information */
1510 1518 if (cmd->sense1 == NULL) {
1511 1519 con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1512 1520 "Sense buffer ptr NULL "));
1513 1521 }
1514 1522 bzero(cmd->sense1, SENSE_LENGTH);
1515 1523 con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1516 1524 "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1517 1525
1518 1526 if (acmd->cmd_cdblen == CDB_GROUP0) {
1519 1527 /* 6-byte cdb */
1520 1528 lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1521 1529 start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1522 1530 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1523 1531 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1524 1532 << 16));
1525 1533 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
1526 1534 /* 10-byte cdb */
1527 1535 lba_count =
1528 1536 (((uint16_t)(pkt->pkt_cdbp[8])) |
1529 1537 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1530 1538
1531 1539 start_lba_lo =
1532 1540 (((uint32_t)(pkt->pkt_cdbp[5])) |
1533 1541 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1534 1542 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1535 1543 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1536 1544
1537 1545 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
1538 1546 /* 12-byte cdb */
1539 1547 lba_count = (
1540 1548 ((uint32_t)(pkt->pkt_cdbp[9])) |
1541 1549 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1542 1550 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1543 1551 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1544 1552
1545 1553 start_lba_lo =
1546 1554 (((uint32_t)(pkt->pkt_cdbp[5])) |
1547 1555 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1548 1556 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1549 1557 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1550 1558
1551 1559 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
1552 1560 /* 16-byte cdb */
1553 1561 lba_count = (
1554 1562 ((uint32_t)(pkt->pkt_cdbp[13])) |
1555 1563 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1556 1564 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1557 1565 ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1558 1566
1559 1567 start_lba_lo = (
1560 1568 ((uint32_t)(pkt->pkt_cdbp[9])) |
1561 1569 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1562 1570 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1563 1571 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1564 1572
1565 1573 start_lba_hi = (
1566 1574 ((uint32_t)(pkt->pkt_cdbp[5])) |
1567 1575 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1568 1576 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1569 1577 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1570 1578 }
1571 1579
1572 1580 if (instance->tbolt &&
1573 1581 ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1574 1582 cmn_err(CE_WARN, " IO SECTOR COUNT exceeds "
1575 1583 "controller limit 0x%x sectors",
1576 1584 lba_count);
1577 1585 }
1578 1586
1579 1587 bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1580 1588 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1581 1589 start_lba_lo;
1582 1590 io_info.numBlocks = lba_count;
1583 1591 io_info.ldTgtId = acmd->device_id;
1584 1592
1585 1593 if (acmd->cmd_flags & CFLAG_DMASEND)
1586 1594 io_info.isRead = 0;
1587 1595 else
1588 1596 io_info.isRead = 1;
1589 1597
1590 1598
1591 1599 /* Acquire SYNC MAP UPDATE lock */
1592 1600 mutex_enter(&instance->sync_map_mtx);
1593 1601
1594 1602 local_map_ptr =
1595 1603 instance->ld_map[(instance->map_id & 1)];
1596 1604
1597 1605 if ((MR_TargetIdToLdGet(
1598 1606 acmd->device_id, local_map_ptr) >=
1599 1607 MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1600 1608 cmn_err(CE_NOTE, "Fast Path NOT Possible, "
1601 1609 "targetId >= MAX_LOGICAL_DRIVES || "
1602 1610 "!instance->fast_path_io");
1603 1611 fp_possible = 0;
1604 1612 /* Set Regionlock flags to BYPASS */
1605 1613 /* io_request->RaidContext.regLockFlags = 0; */
1606 1614 ddi_put8(acc_handle,
1607 1615 &scsi_raid_io->RaidContext.regLockFlags, 0);
1608 1616 } else {
1609 1617 if (MR_BuildRaidContext(instance, &io_info,
1610 1618 &scsi_raid_io->RaidContext, local_map_ptr))
1611 1619 fp_possible = io_info.fpOkForIo;
1612 1620 }
1613 1621
1614 1622 if (!enable_fp)
1615 1623 fp_possible = 0;
1616 1624
1617 1625 con_log(CL_ANN1, (CE_NOTE, "enable_fp %d "
1618 1626 "instance->fast_path_io %d fp_possible %d",
1619 1627 enable_fp, instance->fast_path_io, fp_possible));
1620 1628
1621 1629 if (fp_possible) {
1622 1630
1623 1631 /* Check for DIF enabled LD */
1624 1632 if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1625 1633 /* Prepare 32 Byte CDB for DIF capable Disk */
1626 1634 mrsas_tbolt_prepare_cdb(instance,
1627 1635 scsi_raid_io->CDB.CDB32,
1628 1636 &io_info, scsi_raid_io, start_lba_lo);
1629 1637 } else {
1630 1638 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1631 1639 (uint8_t *)&pd_cmd_cdblen,
1632 1640 io_info.pdBlock, io_info.numBlocks);
1633 1641 ddi_put16(acc_handle,
↓ open down ↓ |
205 lines elided |
↑ open up ↑ |
1634 1642 &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1635 1643 }
1636 1644
1637 1645 ddi_put8(acc_handle, &scsi_raid_io->Function,
1638 1646 MPI2_FUNCTION_SCSI_IO_REQUEST);
1639 1647
1640 1648 ReqDescUnion->SCSIIO.RequestFlags =
1641 1649 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1642 1650 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1643 1651
1644 - if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1652 + if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1653 + (devid == PCI_DEVICE_ID_LSI_FURY)) {
1645 1654 uint8_t regLockFlags = ddi_get8(acc_handle,
1646 1655 &scsi_raid_io->RaidContext.regLockFlags);
1647 1656 uint16_t IoFlags = ddi_get16(acc_handle,
1648 1657 &scsi_raid_io->IoFlags);
1649 1658
1650 1659 if (regLockFlags == REGION_TYPE_UNUSED)
1651 1660 ReqDescUnion->SCSIIO.RequestFlags =
1652 1661 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1653 1662 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1654 1663
1655 1664 IoFlags |=
1656 1665 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1657 1666 regLockFlags |=
1658 1667 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1659 1668 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1660 1669
1661 1670 ddi_put8(acc_handle,
1662 1671 &scsi_raid_io->ChainOffset, 0);
1663 1672 ddi_put8(acc_handle,
1664 1673 &scsi_raid_io->RaidContext.nsegType,
1665 1674 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1666 1675 MPI2_TYPE_CUDA));
1667 1676 ddi_put8(acc_handle,
1668 1677 &scsi_raid_io->RaidContext.regLockFlags,
1669 1678 regLockFlags);
1670 1679 ddi_put16(acc_handle,
1671 1680 &scsi_raid_io->IoFlags, IoFlags);
1672 1681 }
1673 1682
1674 1683 if ((instance->load_balance_info[
1675 1684 acmd->device_id].loadBalanceFlag) &&
1676 1685 (io_info.isRead)) {
1677 1686 io_info.devHandle =
1678 1687 get_updated_dev_handle(&instance->
1679 1688 load_balance_info[acmd->device_id],
1680 1689 &io_info);
1681 1690 cmd->load_balance_flag |=
1682 1691 MEGASAS_LOAD_BALANCE_FLAG;
1683 1692 } else {
1684 1693 cmd->load_balance_flag &=
1685 1694 ~MEGASAS_LOAD_BALANCE_FLAG;
1686 1695 }
1687 1696
1688 1697 ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1689 1698 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1690 1699 io_info.devHandle);
1691 1700
1692 1701 } else {
1693 1702 ddi_put8(acc_handle, &scsi_raid_io->Function,
1694 1703 MPI2_FUNCTION_LD_IO_REQUEST);
1695 1704
1696 1705 ddi_put16(acc_handle,
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
1697 1706 &scsi_raid_io->DevHandle, acmd->device_id);
1698 1707
1699 1708 ReqDescUnion->SCSIIO.RequestFlags =
1700 1709 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1701 1710 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1702 1711
1703 1712 ddi_put16(acc_handle,
1704 1713 &scsi_raid_io->RaidContext.timeoutValue,
1705 1714 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1706 1715
1707 - if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1716 + if ((devid == PCI_DEVICE_ID_LSI_INVADER) ||
1717 + (devid == PCI_DEVICE_ID_LSI_FURY)) {
1708 1718 uint8_t regLockFlags = ddi_get8(acc_handle,
1709 1719 &scsi_raid_io->RaidContext.regLockFlags);
1710 1720
1711 1721 if (regLockFlags == REGION_TYPE_UNUSED) {
1712 1722 ReqDescUnion->SCSIIO.RequestFlags =
1713 1723 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1714 1724 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1715 1725 }
1716 1726
1717 1727 regLockFlags |=
1718 1728 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1719 1729 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1720 1730
1721 1731 ddi_put8(acc_handle,
1722 1732 &scsi_raid_io->RaidContext.nsegType,
1723 1733 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1724 1734 MPI2_TYPE_CUDA));
1725 1735 ddi_put8(acc_handle,
1726 1736 &scsi_raid_io->RaidContext.regLockFlags,
1727 1737 regLockFlags);
1728 1738 }
1729 1739 } /* Not FP */
1730 1740
1731 1741 /* Release SYNC MAP UPDATE lock */
1732 1742 mutex_exit(&instance->sync_map_mtx);
1733 1743
1734 1744
1735 1745 /*
1736 1746 * Set sense buffer physical address/length in scsi_io_request.
1737 1747 */
1738 1748 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1739 1749 cmd->sense_phys_addr1);
1740 1750 ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength,
1741 1751 SENSE_LENGTH);
1742 1752
1743 1753 /* Construct SGL */
1744 1754 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1745 1755 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1746 1756
1747 1757 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1748 1758 scsi_raid_io, &datalen);
1749 1759
1750 1760 ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1751 1761
1752 1762 break;
1753 1763 #ifndef PDSUPPORT /* if PDSUPPORT, skip break and fall through */
1754 1764 } else {
1755 1765 break;
1756 1766 #endif
1757 1767 }
1758 1768 /* fall through For all non-rd/wr cmds */
1759 1769 default:
1760 1770 switch (pkt->pkt_cdbp[0]) {
1761 1771 case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1762 1772 return_raid_msg_pkt(instance, cmd);
1763 1773 *cmd_done = 1;
1764 1774 return (NULL);
1765 1775 }
1766 1776
1767 1777 case SCMD_MODE_SENSE:
1768 1778 case SCMD_MODE_SENSE_G1: {
1769 1779 union scsi_cdb *cdbp;
1770 1780 uint16_t page_code;
1771 1781
1772 1782 cdbp = (void *)pkt->pkt_cdbp;
1773 1783 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1774 1784 switch (page_code) {
1775 1785 case 0x3:
1776 1786 case 0x4:
1777 1787 (void) mrsas_mode_sense_build(pkt);
1778 1788 return_raid_msg_pkt(instance, cmd);
1779 1789 *cmd_done = 1;
1780 1790 return (NULL);
1781 1791 }
1782 1792 break;
1783 1793 }
1784 1794
1785 1795 default: {
1786 1796 /*
1787 1797 * Here we need to handle PASSTHRU for
1788 1798 * Logical Devices. Like Inquiry etc.
1789 1799 */
1790 1800
1791 1801 if (!(acmd->islogical)) {
1792 1802
1793 1803 /* Acquire SYNC MAP UPDATE lock */
1794 1804 mutex_enter(&instance->sync_map_mtx);
1795 1805
1796 1806 local_map_ptr =
1797 1807 instance->ld_map[(instance->map_id & 1)];
1798 1808
1799 1809 ddi_put8(acc_handle, &scsi_raid_io->Function,
1800 1810 MPI2_FUNCTION_SCSI_IO_REQUEST);
1801 1811
1802 1812 ReqDescUnion->SCSIIO.RequestFlags =
1803 1813 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1804 1814 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1805 1815
1806 1816 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1807 1817 local_map_ptr->raidMap.
1808 1818 devHndlInfo[acmd->device_id].curDevHdl);
1809 1819
1810 1820
1811 1821 /* Set regLockFlasgs to REGION_TYPE_BYPASS */
1812 1822 ddi_put8(acc_handle,
1813 1823 &scsi_raid_io->RaidContext.regLockFlags, 0);
1814 1824 ddi_put64(acc_handle,
1815 1825 &scsi_raid_io->RaidContext.regLockRowLBA,
1816 1826 0);
1817 1827 ddi_put32(acc_handle,
1818 1828 &scsi_raid_io->RaidContext.regLockLength,
1819 1829 0);
1820 1830 ddi_put8(acc_handle,
1821 1831 &scsi_raid_io->RaidContext.RAIDFlags,
1822 1832 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1823 1833 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1824 1834 ddi_put16(acc_handle,
1825 1835 &scsi_raid_io->RaidContext.timeoutValue,
1826 1836 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1827 1837 ddi_put16(acc_handle,
1828 1838 &scsi_raid_io->RaidContext.ldTargetId,
1829 1839 acmd->device_id);
1830 1840 ddi_put8(acc_handle,
1831 1841 &scsi_raid_io->LUN[1], acmd->lun);
1832 1842
1833 1843 /* Release SYNC MAP UPDATE lock */
1834 1844 mutex_exit(&instance->sync_map_mtx);
1835 1845
1836 1846 } else {
1837 1847 ddi_put8(acc_handle, &scsi_raid_io->Function,
1838 1848 MPI2_FUNCTION_LD_IO_REQUEST);
1839 1849 ddi_put8(acc_handle,
1840 1850 &scsi_raid_io->LUN[1], acmd->lun);
1841 1851 ddi_put16(acc_handle,
1842 1852 &scsi_raid_io->DevHandle, acmd->device_id);
1843 1853 ReqDescUnion->SCSIIO.RequestFlags =
1844 1854 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1845 1855 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1846 1856 }
1847 1857
1848 1858 /*
1849 1859 * Set sense buffer physical address/length in
1850 1860 * scsi_io_request.
1851 1861 */
1852 1862 ddi_put32(acc_handle,
1853 1863 &scsi_raid_io->SenseBufferLowAddress,
1854 1864 cmd->sense_phys_addr1);
1855 1865 ddi_put8(acc_handle,
1856 1866 &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1857 1867
1858 1868 /* Construct SGL */
1859 1869 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1860 1870 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1861 1871
1862 1872 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1863 1873 scsi_raid_io, &datalen);
1864 1874
1865 1875 ddi_put32(acc_handle,
1866 1876 &scsi_raid_io->DataLength, datalen);
1867 1877
1868 1878
1869 1879 con_log(CL_ANN, (CE_CONT,
1870 1880 "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1871 1881 pkt->pkt_cdbp[0], acmd->device_id));
1872 1882 con_log(CL_DLEVEL1, (CE_CONT,
1873 1883 "data length = %x\n",
1874 1884 scsi_raid_io->DataLength));
1875 1885 con_log(CL_DLEVEL1, (CE_CONT,
1876 1886 "cdb length = %x\n",
1877 1887 acmd->cmd_cdblen));
1878 1888 }
1879 1889 break;
1880 1890 }
1881 1891
1882 1892 }
1883 1893
1884 1894 return (cmd);
1885 1895 }
1886 1896
1887 1897 uint32_t
1888 1898 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1889 1899 {
1890 1900 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1891 1901 }
1892 1902
1893 1903 void
1894 1904 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1895 1905 {
1896 1906 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1897 1907 atomic_inc_16(&instance->fw_outstanding);
1898 1908
1899 1909 struct scsi_pkt *pkt;
1900 1910
1901 1911 con_log(CL_ANN1,
1902 1912 (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1903 1913
1904 1914 con_log(CL_DLEVEL1, (CE_CONT,
1905 1915 " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1906 1916 con_log(CL_DLEVEL1, (CE_CONT,
1907 1917 " [req desc low part] %x \n",
1908 1918 (uint_t)(req_desc->Words & 0xffffffffff)));
1909 1919 con_log(CL_DLEVEL1, (CE_CONT,
1910 1920 " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1911 1921 pkt = cmd->pkt;
1912 1922
1913 1923 if (pkt) {
1914 1924 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1915 1925 "ISSUED CMD TO FW : called : cmd:"
1916 1926 ": %p instance : %p pkt : %p pkt_time : %x\n",
1917 1927 gethrtime(), (void *)cmd, (void *)instance,
1918 1928 (void *)pkt, cmd->drv_pkt_time));
1919 1929 if (instance->adapterresetinprogress) {
1920 1930 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1921 1931 con_log(CL_ANN, (CE_NOTE,
1922 1932 "TBOLT Reset the scsi_pkt timer"));
1923 1933 } else {
1924 1934 push_pending_mfi_pkt(instance, cmd);
1925 1935 }
1926 1936
1927 1937 } else {
1928 1938 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1929 1939 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
1930 1940 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
1931 1941 }
1932 1942
1933 1943 /* Issue the command to the FW */
1934 1944 mutex_enter(&instance->reg_write_mtx);
1935 1945 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1936 1946 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1937 1947 mutex_exit(&instance->reg_write_mtx);
1938 1948 }
1939 1949
1940 1950 /*
1941 1951 * issue_cmd_in_sync_mode
1942 1952 */
1943 1953 int
1944 1954 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
1945 1955 struct mrsas_cmd *cmd)
1946 1956 {
1947 1957 int i;
1948 1958 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
1949 1959 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1950 1960
1951 1961 struct mrsas_header *hdr;
1952 1962 hdr = (struct mrsas_header *)&cmd->frame->hdr;
1953 1963
1954 1964 con_log(CL_ANN,
1955 1965 (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
1956 1966 cmd->SMID));
1957 1967
1958 1968
1959 1969 if (instance->adapterresetinprogress) {
1960 1970 cmd->drv_pkt_time = ddi_get16
1961 1971 (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
1962 1972 if (cmd->drv_pkt_time < debug_timeout_g)
1963 1973 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1964 1974 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
1965 1975 "RESET-IN-PROGRESS, issue cmd & return."));
1966 1976
1967 1977 mutex_enter(&instance->reg_write_mtx);
1968 1978 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1969 1979 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1970 1980 mutex_exit(&instance->reg_write_mtx);
1971 1981
1972 1982 return (DDI_SUCCESS);
1973 1983 } else {
1974 1984 con_log(CL_ANN1, (CE_NOTE,
1975 1985 "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
1976 1986 push_pending_mfi_pkt(instance, cmd);
1977 1987 }
1978 1988
1979 1989 con_log(CL_DLEVEL2, (CE_NOTE,
1980 1990 "HighQport offset :%p",
1981 1991 (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
1982 1992 con_log(CL_DLEVEL2, (CE_NOTE,
1983 1993 "LowQport offset :%p",
1984 1994 (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
1985 1995
1986 1996 cmd->sync_cmd = MRSAS_TRUE;
1987 1997 cmd->cmd_status = ENODATA;
1988 1998
1989 1999
1990 2000 mutex_enter(&instance->reg_write_mtx);
1991 2001 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1992 2002 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1993 2003 mutex_exit(&instance->reg_write_mtx);
1994 2004
1995 2005 con_log(CL_ANN1, (CE_NOTE,
1996 2006 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
1997 2007 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
1998 2008 (uint_t)(req_desc->Words & 0xffffffff)));
1999 2009
2000 2010 mutex_enter(&instance->int_cmd_mtx);
2001 2011 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2002 2012 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2003 2013 }
2004 2014 mutex_exit(&instance->int_cmd_mtx);
2005 2015
2006 2016
2007 2017 if (i < (msecs -1)) {
2008 2018 return (DDI_SUCCESS);
2009 2019 } else {
2010 2020 return (DDI_FAILURE);
2011 2021 }
2012 2022 }
2013 2023
2014 2024 /*
2015 2025 * issue_cmd_in_poll_mode
2016 2026 */
2017 2027 int
2018 2028 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2019 2029 struct mrsas_cmd *cmd)
2020 2030 {
2021 2031 int i;
2022 2032 uint16_t flags;
2023 2033 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2024 2034 struct mrsas_header *frame_hdr;
2025 2035
2026 2036 con_log(CL_ANN,
2027 2037 (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2028 2038 cmd->SMID));
2029 2039
2030 2040 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2031 2041
2032 2042 frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2033 2043 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2034 2044 MFI_CMD_STATUS_POLL_MODE);
2035 2045 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2036 2046 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2037 2047 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2038 2048
2039 2049 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2040 2050 (uint_t)(req_desc->Words & 0xffffffff)));
2041 2051 con_log(CL_ANN1, (CE_NOTE,
2042 2052 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2043 2053
2044 2054 /* issue the frame using inbound queue port */
2045 2055 mutex_enter(&instance->reg_write_mtx);
2046 2056 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2047 2057 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2048 2058 mutex_exit(&instance->reg_write_mtx);
2049 2059
2050 2060 for (i = 0; i < msecs && (
2051 2061 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2052 2062 == MFI_CMD_STATUS_POLL_MODE); i++) {
2053 2063 /* wait for cmd_status to change from 0xFF */
2054 2064 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2055 2065 }
2056 2066
2057 2067 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2058 2068 &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2059 2069 con_log(CL_ANN1, (CE_NOTE,
2060 2070 " cmd failed %" PRIx64, (req_desc->Words)));
2061 2071 return (DDI_FAILURE);
2062 2072 }
2063 2073
2064 2074 return (DDI_SUCCESS);
2065 2075 }
2066 2076
2067 2077 void
2068 2078 tbolt_enable_intr(struct mrsas_instance *instance)
2069 2079 {
2070 2080 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2071 2081 /* writel(~0, ®s->outbound_intr_status); */
2072 2082 /* readl(®s->outbound_intr_status); */
2073 2083
2074 2084 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2075 2085
2076 2086 /* dummy read to force PCI flush */
2077 2087 (void) RD_OB_INTR_MASK(instance);
2078 2088
2079 2089 }
2080 2090
2081 2091 void
2082 2092 tbolt_disable_intr(struct mrsas_instance *instance)
2083 2093 {
2084 2094 uint32_t mask = 0xFFFFFFFF;
2085 2095
2086 2096 WR_OB_INTR_MASK(mask, instance);
2087 2097
2088 2098 /* Dummy readl to force pci flush */
2089 2099
2090 2100 (void) RD_OB_INTR_MASK(instance);
2091 2101 }
2092 2102
2093 2103
2094 2104 int
2095 2105 tbolt_intr_ack(struct mrsas_instance *instance)
2096 2106 {
2097 2107 uint32_t status;
2098 2108
2099 2109 /* check if it is our interrupt */
2100 2110 status = RD_OB_INTR_STATUS(instance);
2101 2111 con_log(CL_ANN1, (CE_NOTE,
2102 2112 "chkpnt: Entered tbolt_intr_ack status = %d", status));
2103 2113
2104 2114 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2105 2115 return (DDI_INTR_UNCLAIMED);
2106 2116 }
2107 2117
2108 2118 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2109 2119 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2110 2120 return (DDI_INTR_UNCLAIMED);
2111 2121 }
2112 2122
2113 2123 if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2114 2124 /* clear the interrupt by writing back the same value */
2115 2125 WR_OB_INTR_STATUS(status, instance);
2116 2126 /* dummy READ */
2117 2127 (void) RD_OB_INTR_STATUS(instance);
2118 2128 }
2119 2129 return (DDI_INTR_CLAIMED);
2120 2130 }
2121 2131
2122 2132 /*
2123 2133 * get_raid_msg_pkt : Get a command from the free pool
2124 2134 * After successful allocation, the caller of this routine
2125 2135 * must clear the frame buffer (memset to zero) before
2126 2136 * using the packet further.
2127 2137 *
2128 2138 * ***** Note *****
2129 2139 * After clearing the frame buffer the context id of the
2130 2140 * frame buffer SHOULD be restored back.
2131 2141 */
2132 2142
2133 2143 struct mrsas_cmd *
2134 2144 get_raid_msg_pkt(struct mrsas_instance *instance)
2135 2145 {
2136 2146 mlist_t *head = &instance->cmd_pool_list;
2137 2147 struct mrsas_cmd *cmd = NULL;
2138 2148
2139 2149 mutex_enter(&instance->cmd_pool_mtx);
2140 2150 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2141 2151
2142 2152
2143 2153 if (!mlist_empty(head)) {
2144 2154 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2145 2155 mlist_del_init(head->next);
2146 2156 }
2147 2157 if (cmd != NULL) {
2148 2158 cmd->pkt = NULL;
2149 2159 cmd->retry_count_for_ocr = 0;
2150 2160 cmd->drv_pkt_time = 0;
2151 2161 }
2152 2162 mutex_exit(&instance->cmd_pool_mtx);
2153 2163
2154 2164 if (cmd != NULL)
2155 2165 bzero(cmd->scsi_io_request,
2156 2166 sizeof (Mpi2RaidSCSIIORequest_t));
2157 2167 return (cmd);
2158 2168 }
2159 2169
2160 2170 struct mrsas_cmd *
2161 2171 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2162 2172 {
2163 2173 mlist_t *head = &instance->cmd_app_pool_list;
2164 2174 struct mrsas_cmd *cmd = NULL;
2165 2175
2166 2176 mutex_enter(&instance->cmd_app_pool_mtx);
2167 2177 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2168 2178
2169 2179 if (!mlist_empty(head)) {
2170 2180 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2171 2181 mlist_del_init(head->next);
2172 2182 }
2173 2183 if (cmd != NULL) {
2174 2184 cmd->retry_count_for_ocr = 0;
2175 2185 cmd->drv_pkt_time = 0;
2176 2186 cmd->pkt = NULL;
2177 2187 cmd->request_desc = NULL;
2178 2188
2179 2189 }
2180 2190
2181 2191 mutex_exit(&instance->cmd_app_pool_mtx);
2182 2192
2183 2193 if (cmd != NULL) {
2184 2194 bzero(cmd->scsi_io_request,
2185 2195 sizeof (Mpi2RaidSCSIIORequest_t));
2186 2196 }
2187 2197
2188 2198 return (cmd);
2189 2199 }
2190 2200
2191 2201 /*
2192 2202 * return_raid_msg_pkt : Return a cmd to free command pool
2193 2203 */
2194 2204 void
2195 2205 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2196 2206 {
2197 2207 mutex_enter(&instance->cmd_pool_mtx);
2198 2208 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2199 2209
2200 2210
2201 2211 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2202 2212
2203 2213 mutex_exit(&instance->cmd_pool_mtx);
2204 2214 }
2205 2215
2206 2216 void
2207 2217 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2208 2218 {
2209 2219 mutex_enter(&instance->cmd_app_pool_mtx);
2210 2220 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2211 2221
2212 2222 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2213 2223
2214 2224 mutex_exit(&instance->cmd_app_pool_mtx);
2215 2225 }
2216 2226
2217 2227
2218 2228 void
2219 2229 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2220 2230 struct mrsas_cmd *cmd)
2221 2231 {
2222 2232 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2223 2233 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
2224 2234 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2225 2235 uint32_t index;
2226 2236 ddi_acc_handle_t acc_handle =
2227 2237 instance->mpi2_frame_pool_dma_obj.acc_handle;
2228 2238
2229 2239 if (!instance->tbolt) {
2230 2240 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2231 2241 return;
2232 2242 }
2233 2243
2234 2244 index = cmd->index;
2235 2245
2236 2246 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2237 2247
2238 2248 if (!ReqDescUnion) {
2239 2249 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2240 2250 return;
2241 2251 }
2242 2252
2243 2253 con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2244 2254
2245 2255 ReqDescUnion->Words = 0;
2246 2256
2247 2257 ReqDescUnion->SCSIIO.RequestFlags =
↓ open down ↓ |
530 lines elided |
↑ open up ↑ |
2248 2258 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2249 2259 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2250 2260
2251 2261 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2252 2262
2253 2263 cmd->request_desc = ReqDescUnion;
2254 2264
2255 2265 /* get raid message frame pointer */
2256 2266 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2257 2267
2258 - if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2268 + if ((instance->device_id == PCI_DEVICE_ID_LSI_INVADER) ||
2269 + (instance->device_id == PCI_DEVICE_ID_LSI_FURY)) {
2259 2270 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2260 2271 &scsi_raid_io->SGL.IeeeChain;
2261 2272 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2262 2273 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2263 2274 }
2264 2275
2265 2276 ddi_put8(acc_handle, &scsi_raid_io->Function,
2266 2277 MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2267 2278
2268 2279 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2269 2280 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2270 2281
2271 2282 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2272 2283 (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2273 2284
2274 2285 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2275 2286 cmd->sense_phys_addr1);
2276 2287
2277 2288
2278 2289 scsi_raid_io_sgl_ieee =
2279 2290 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2280 2291
2281 2292 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2282 2293 (U64)cmd->frame_phys_addr);
2283 2294
2284 2295 ddi_put8(acc_handle,
2285 2296 &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2286 2297 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2287 2298 /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2288 2299 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2289 2300
2290 2301 con_log(CL_ANN1, (CE_NOTE,
2291 2302 "[MFI CMD PHY ADDRESS]:%" PRIx64,
2292 2303 scsi_raid_io_sgl_ieee->Address));
2293 2304 con_log(CL_ANN1, (CE_NOTE,
2294 2305 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2295 2306 con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2296 2307 scsi_raid_io_sgl_ieee->Flags));
2297 2308 }
2298 2309
2299 2310
2300 2311 void
2301 2312 tbolt_complete_cmd(struct mrsas_instance *instance,
2302 2313 struct mrsas_cmd *cmd)
2303 2314 {
2304 2315 uint8_t status;
2305 2316 uint8_t extStatus;
2306 2317 uint8_t arm;
2307 2318 struct scsa_cmd *acmd;
2308 2319 struct scsi_pkt *pkt;
2309 2320 struct scsi_arq_status *arqstat;
2310 2321 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2311 2322 LD_LOAD_BALANCE_INFO *lbinfo;
2312 2323 ddi_acc_handle_t acc_handle =
2313 2324 instance->mpi2_frame_pool_dma_obj.acc_handle;
2314 2325
2315 2326 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2316 2327
2317 2328 status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2318 2329 extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2319 2330
2320 2331 con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2321 2332 con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2322 2333
2323 2334 if (status != MFI_STAT_OK) {
2324 2335 con_log(CL_ANN, (CE_WARN,
2325 2336 "IO Cmd Failed SMID %x", cmd->SMID));
2326 2337 } else {
2327 2338 con_log(CL_ANN, (CE_NOTE,
2328 2339 "IO Cmd Success SMID %x", cmd->SMID));
2329 2340 }
2330 2341
2331 2342 /* regular commands */
2332 2343
2333 2344 switch (ddi_get8(acc_handle, &scsi_raid_io->Function)) {
2334 2345
2335 2346 case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
2336 2347 acmd = (struct scsa_cmd *)cmd->cmd;
2337 2348 lbinfo = &instance->load_balance_info[acmd->device_id];
2338 2349
2339 2350 if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2340 2351 arm = lbinfo->raid1DevHandle[0] ==
2341 2352 scsi_raid_io->DevHandle ? 0 : 1;
2342 2353
2343 2354 lbinfo->scsi_pending_cmds[arm]--;
2344 2355 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2345 2356 }
2346 2357 con_log(CL_DLEVEL3, (CE_NOTE,
2347 2358 "FastPath IO Completion Success "));
2348 2359 /* FALLTHRU */
2349 2360
2350 2361 case MPI2_FUNCTION_LD_IO_REQUEST : { /* Regular Path IO. */
2351 2362 acmd = (struct scsa_cmd *)cmd->cmd;
2352 2363 pkt = (struct scsi_pkt *)CMD2PKT(acmd);
2353 2364
2354 2365 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2355 2366 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2356 2367 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2357 2368 acmd->cmd_dma_offset, acmd->cmd_dma_len,
2358 2369 DDI_DMA_SYNC_FORCPU);
2359 2370 }
2360 2371 }
2361 2372
2362 2373 pkt->pkt_reason = CMD_CMPLT;
2363 2374 pkt->pkt_statistics = 0;
2364 2375 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2365 2376 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2366 2377
2367 2378 con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2368 2379 "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2369 2380 ((acmd->islogical) ? "LD" : "PD"),
2370 2381 acmd->cmd_dmacount, cmd->SMID, status));
2371 2382
2372 2383 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2373 2384 struct scsi_inquiry *inq;
2374 2385
2375 2386 if (acmd->cmd_dmacount != 0) {
2376 2387 bp_mapin(acmd->cmd_buf);
2377 2388 inq = (struct scsi_inquiry *)
2378 2389 acmd->cmd_buf->b_un.b_addr;
2379 2390
2380 2391 /* don't expose physical drives to OS */
2381 2392 if (acmd->islogical &&
2382 2393 (status == MFI_STAT_OK)) {
2383 2394 display_scsi_inquiry((caddr_t)inq);
2384 2395 #ifdef PDSUPPORT
2385 2396 } else if ((status == MFI_STAT_OK) &&
2386 2397 inq->inq_dtype == DTYPE_DIRECT) {
2387 2398 display_scsi_inquiry((caddr_t)inq);
2388 2399 #endif
2389 2400 } else {
2390 2401 /* for physical disk */
2391 2402 status = MFI_STAT_DEVICE_NOT_FOUND;
2392 2403 }
2393 2404 }
2394 2405 }
2395 2406
2396 2407 switch (status) {
2397 2408 case MFI_STAT_OK:
2398 2409 pkt->pkt_scbp[0] = STATUS_GOOD;
2399 2410 break;
2400 2411 case MFI_STAT_LD_CC_IN_PROGRESS:
2401 2412 case MFI_STAT_LD_RECON_IN_PROGRESS:
2402 2413 pkt->pkt_scbp[0] = STATUS_GOOD;
2403 2414 break;
2404 2415 case MFI_STAT_LD_INIT_IN_PROGRESS:
2405 2416 pkt->pkt_reason = CMD_TRAN_ERR;
2406 2417 break;
2407 2418 case MFI_STAT_SCSI_IO_FAILED:
2408 2419 cmn_err(CE_WARN, "tbolt_complete_cmd: scsi_io failed");
2409 2420 pkt->pkt_reason = CMD_TRAN_ERR;
2410 2421 break;
2411 2422 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2412 2423 con_log(CL_ANN, (CE_WARN,
2413 2424 "tbolt_complete_cmd: scsi_done with error"));
2414 2425
2415 2426 pkt->pkt_reason = CMD_CMPLT;
2416 2427 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2417 2428
2418 2429 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2419 2430 con_log(CL_ANN,
2420 2431 (CE_WARN, "TEST_UNIT_READY fail"));
2421 2432 } else {
2422 2433 pkt->pkt_state |= STATE_ARQ_DONE;
2423 2434 arqstat = (void *)(pkt->pkt_scbp);
2424 2435 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2425 2436 arqstat->sts_rqpkt_resid = 0;
2426 2437 arqstat->sts_rqpkt_state |=
2427 2438 STATE_GOT_BUS | STATE_GOT_TARGET
2428 2439 | STATE_SENT_CMD
2429 2440 | STATE_XFERRED_DATA;
2430 2441 *(uint8_t *)&arqstat->sts_rqpkt_status =
2431 2442 STATUS_GOOD;
2432 2443 con_log(CL_ANN1,
2433 2444 (CE_NOTE, "Copying Sense data %x",
2434 2445 cmd->SMID));
2435 2446
2436 2447 ddi_rep_get8(acc_handle,
2437 2448 (uint8_t *)&(arqstat->sts_sensedata),
2438 2449 cmd->sense1,
2439 2450 sizeof (struct scsi_extended_sense),
2440 2451 DDI_DEV_AUTOINCR);
2441 2452
2442 2453 }
2443 2454 break;
2444 2455 case MFI_STAT_LD_OFFLINE:
2445 2456 cmn_err(CE_WARN,
2446 2457 "tbolt_complete_cmd: ld offline "
2447 2458 "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2448 2459 /* UNDO: */
2449 2460 ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2450 2461
2451 2462 ddi_get16(acc_handle,
2452 2463 &scsi_raid_io->RaidContext.ldTargetId),
2453 2464
2454 2465 ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2455 2466
2456 2467 pkt->pkt_reason = CMD_DEV_GONE;
2457 2468 pkt->pkt_statistics = STAT_DISCON;
2458 2469 break;
2459 2470 case MFI_STAT_DEVICE_NOT_FOUND:
2460 2471 con_log(CL_ANN, (CE_CONT,
2461 2472 "tbolt_complete_cmd: device not found error"));
2462 2473 pkt->pkt_reason = CMD_DEV_GONE;
2463 2474 pkt->pkt_statistics = STAT_DISCON;
2464 2475 break;
2465 2476
2466 2477 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2467 2478 pkt->pkt_state |= STATE_ARQ_DONE;
2468 2479 pkt->pkt_reason = CMD_CMPLT;
2469 2480 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2470 2481
2471 2482 arqstat = (void *)(pkt->pkt_scbp);
2472 2483 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2473 2484 arqstat->sts_rqpkt_resid = 0;
2474 2485 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2475 2486 | STATE_GOT_TARGET | STATE_SENT_CMD
2476 2487 | STATE_XFERRED_DATA;
2477 2488 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2478 2489
2479 2490 arqstat->sts_sensedata.es_valid = 1;
2480 2491 arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2481 2492 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2482 2493
2483 2494 /*
2484 2495 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2485 2496 * ASC: 0x21h; ASCQ: 0x00h;
2486 2497 */
2487 2498 arqstat->sts_sensedata.es_add_code = 0x21;
2488 2499 arqstat->sts_sensedata.es_qual_code = 0x00;
2489 2500 break;
2490 2501 case MFI_STAT_INVALID_CMD:
2491 2502 case MFI_STAT_INVALID_DCMD:
2492 2503 case MFI_STAT_INVALID_PARAMETER:
2493 2504 case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2494 2505 default:
2495 2506 cmn_err(CE_WARN, "tbolt_complete_cmd: Unknown status!");
2496 2507 pkt->pkt_reason = CMD_TRAN_ERR;
2497 2508
2498 2509 break;
2499 2510 }
2500 2511
2501 2512 atomic_add_16(&instance->fw_outstanding, (-1));
2502 2513
2503 2514 (void) mrsas_common_check(instance, cmd);
2504 2515 if (acmd->cmd_dmahandle) {
2505 2516 if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2506 2517 DDI_SUCCESS) {
2507 2518 ddi_fm_service_impact(instance->dip,
2508 2519 DDI_SERVICE_UNAFFECTED);
2509 2520 pkt->pkt_reason = CMD_TRAN_ERR;
2510 2521 pkt->pkt_statistics = 0;
2511 2522 }
2512 2523 }
2513 2524
2514 2525 /* Call the callback routine */
2515 2526 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2516 2527 (*pkt->pkt_comp)(pkt);
2517 2528
2518 2529 con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2519 2530
2520 2531 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2521 2532
2522 2533 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2523 2534
2524 2535 return_raid_msg_pkt(instance, cmd);
2525 2536 break;
2526 2537 }
2527 2538 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFA command. */
2528 2539
2529 2540 if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2530 2541 cmd->frame->dcmd.mbox.b[1] == 1) {
2531 2542
2532 2543 mutex_enter(&instance->sync_map_mtx);
2533 2544
2534 2545 con_log(CL_ANN, (CE_NOTE,
2535 2546 "LDMAP sync command SMID RECEIVED 0x%X",
2536 2547 cmd->SMID));
2537 2548 if (cmd->frame->hdr.cmd_status != 0) {
2538 2549 cmn_err(CE_WARN,
2539 2550 "map sync failed, status = 0x%x.",
2540 2551 cmd->frame->hdr.cmd_status);
2541 2552 } else {
2542 2553 instance->map_id++;
2543 2554 cmn_err(CE_NOTE,
2544 2555 "map sync received, switched map_id to %"
2545 2556 PRIu64 " \n", instance->map_id);
2546 2557 }
2547 2558
2548 2559 if (MR_ValidateMapInfo(instance->ld_map[
2549 2560 (instance->map_id & 1)],
2550 2561 instance->load_balance_info)) {
2551 2562 instance->fast_path_io = 1;
2552 2563 } else {
2553 2564 instance->fast_path_io = 0;
2554 2565 }
2555 2566
2556 2567 con_log(CL_ANN, (CE_NOTE,
2557 2568 "instance->fast_path_io %d",
2558 2569 instance->fast_path_io));
2559 2570
2560 2571 instance->unroll.syncCmd = 0;
2561 2572
2562 2573 if (instance->map_update_cmd == cmd) {
2563 2574 return_raid_msg_pkt(instance, cmd);
2564 2575 atomic_add_16(&instance->fw_outstanding, (-1));
2565 2576 (void) mrsas_tbolt_sync_map_info(instance);
2566 2577 }
2567 2578
2568 2579 cmn_err(CE_NOTE, "LDMAP sync completed.");
2569 2580 mutex_exit(&instance->sync_map_mtx);
2570 2581 break;
2571 2582 }
2572 2583
2573 2584 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2574 2585 con_log(CL_ANN1, (CE_CONT,
2575 2586 "AEN command SMID RECEIVED 0x%X",
2576 2587 cmd->SMID));
2577 2588 if ((instance->aen_cmd == cmd) &&
2578 2589 (instance->aen_cmd->abort_aen)) {
2579 2590 con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2580 2591 "aborted_aen returned"));
2581 2592 } else {
2582 2593 atomic_add_16(&instance->fw_outstanding, (-1));
2583 2594 service_mfi_aen(instance, cmd);
2584 2595 }
2585 2596 }
2586 2597
2587 2598 if (cmd->sync_cmd == MRSAS_TRUE) {
2588 2599 con_log(CL_ANN1, (CE_CONT,
2589 2600 "Sync-mode Command Response SMID RECEIVED 0x%X",
2590 2601 cmd->SMID));
2591 2602
2592 2603 tbolt_complete_cmd_in_sync_mode(instance, cmd);
2593 2604 } else {
2594 2605 con_log(CL_ANN, (CE_CONT,
2595 2606 "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2596 2607 cmd->SMID));
2597 2608 }
2598 2609 break;
2599 2610 default:
2600 2611 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2601 2612 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2602 2613
2603 2614 /* free message */
2604 2615 con_log(CL_ANN,
2605 2616 (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2606 2617 break;
2607 2618 }
2608 2619 }
2609 2620
2610 2621 uint_t
2611 2622 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2612 2623 {
2613 2624 uint8_t replyType;
2614 2625 Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2615 2626 Mpi2ReplyDescriptorsUnion_t *desc;
2616 2627 uint16_t smid;
2617 2628 union desc_value d_val;
2618 2629 struct mrsas_cmd *cmd;
2619 2630
2620 2631 struct mrsas_header *hdr;
2621 2632 struct scsi_pkt *pkt;
2622 2633
2623 2634 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2624 2635 0, 0, DDI_DMA_SYNC_FORDEV);
2625 2636
2626 2637 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2627 2638 0, 0, DDI_DMA_SYNC_FORCPU);
2628 2639
2629 2640 desc = instance->reply_frame_pool;
2630 2641 desc += instance->reply_read_index;
2631 2642
2632 2643 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2633 2644 replyType = replyDesc->ReplyFlags &
2634 2645 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2635 2646
2636 2647 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2637 2648 return (DDI_INTR_UNCLAIMED);
2638 2649
2639 2650 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2640 2651 != DDI_SUCCESS) {
2641 2652 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2642 2653 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2643 2654 con_log(CL_ANN1,
2644 2655 (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2645 2656 "FMA check, returning DDI_INTR_UNCLAIMED"));
2646 2657 return (DDI_INTR_CLAIMED);
2647 2658 }
2648 2659
2649 2660 con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64,
2650 2661 (void *)desc, desc->Words));
2651 2662
2652 2663 d_val.word = desc->Words;
2653 2664
2654 2665
2655 2666 /* Read Reply descriptor */
2656 2667 while ((d_val.u1.low != 0xffffffff) &&
2657 2668 (d_val.u1.high != 0xffffffff)) {
2658 2669
2659 2670 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2660 2671 0, 0, DDI_DMA_SYNC_FORCPU);
2661 2672
2662 2673 smid = replyDesc->SMID;
2663 2674
2664 2675 if (!smid || smid > instance->max_fw_cmds + 1) {
2665 2676 con_log(CL_ANN1, (CE_NOTE,
2666 2677 "Reply Desc at Break = %p Words = %" PRIx64,
2667 2678 (void *)desc, desc->Words));
2668 2679 break;
2669 2680 }
2670 2681
2671 2682 cmd = instance->cmd_list[smid - 1];
2672 2683 if (!cmd) {
2673 2684 con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2674 2685 "outstanding_cmd: Invalid command "
2675 2686 " or Poll commad Received in completion path"));
2676 2687 } else {
2677 2688 mutex_enter(&instance->cmd_pend_mtx);
2678 2689 if (cmd->sync_cmd == MRSAS_TRUE) {
2679 2690 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2680 2691 if (hdr) {
2681 2692 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2682 2693 "tbolt_process_outstanding_cmd:"
2683 2694 " mlist_del_init(&cmd->list)."));
2684 2695 mlist_del_init(&cmd->list);
2685 2696 }
2686 2697 } else {
2687 2698 pkt = cmd->pkt;
2688 2699 if (pkt) {
2689 2700 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2690 2701 "tbolt_process_outstanding_cmd:"
2691 2702 "mlist_del_init(&cmd->list)."));
2692 2703 mlist_del_init(&cmd->list);
2693 2704 }
2694 2705 }
2695 2706
2696 2707 mutex_exit(&instance->cmd_pend_mtx);
2697 2708
2698 2709 tbolt_complete_cmd(instance, cmd);
2699 2710 }
2700 2711 /* set it back to all 1s. */
2701 2712 desc->Words = -1LL;
2702 2713
2703 2714 instance->reply_read_index++;
2704 2715
2705 2716 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2706 2717 con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2707 2718 instance->reply_read_index = 0;
2708 2719 }
2709 2720
2710 2721 /* Get the next reply descriptor */
2711 2722 if (!instance->reply_read_index)
2712 2723 desc = instance->reply_frame_pool;
2713 2724 else
2714 2725 desc++;
2715 2726
2716 2727 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2717 2728
2718 2729 d_val.word = desc->Words;
2719 2730
2720 2731 con_log(CL_ANN1, (CE_NOTE,
2721 2732 "Next Reply Desc = %p Words = %" PRIx64,
2722 2733 (void *)desc, desc->Words));
2723 2734
2724 2735 replyType = replyDesc->ReplyFlags &
2725 2736 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2726 2737
2727 2738 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2728 2739 break;
2729 2740
2730 2741 } /* End of while loop. */
2731 2742
2732 2743 /* update replyIndex to FW */
2733 2744 WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2734 2745
2735 2746
2736 2747 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2737 2748 0, 0, DDI_DMA_SYNC_FORDEV);
2738 2749
2739 2750 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2740 2751 0, 0, DDI_DMA_SYNC_FORCPU);
2741 2752 return (DDI_INTR_CLAIMED);
2742 2753 }
2743 2754
2744 2755
2745 2756
2746 2757
2747 2758 /*
2748 2759 * complete_cmd_in_sync_mode - Completes an internal command
2749 2760 * @instance: Adapter soft state
2750 2761 * @cmd: Command to be completed
2751 2762 *
2752 2763 * The issue_cmd_in_sync_mode() function waits for a command to complete
2753 2764 * after it issues a command. This function wakes up that waiting routine by
2754 2765 * calling wake_up() on the wait queue.
2755 2766 */
2756 2767 void
2757 2768 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2758 2769 struct mrsas_cmd *cmd)
2759 2770 {
2760 2771
2761 2772 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2762 2773 &cmd->frame->io.cmd_status);
2763 2774
2764 2775 cmd->sync_cmd = MRSAS_FALSE;
2765 2776
2766 2777 mutex_enter(&instance->int_cmd_mtx);
2767 2778 if (cmd->cmd_status == ENODATA) {
2768 2779 cmd->cmd_status = 0;
2769 2780 }
2770 2781 cv_broadcast(&instance->int_cmd_cv);
2771 2782 mutex_exit(&instance->int_cmd_mtx);
2772 2783
2773 2784 }
2774 2785
2775 2786 /*
2776 2787 * mrsas_tbolt_get_ld_map_info - Returns ld_map structure
2777 2788 * instance: Adapter soft state
2778 2789 *
2779 2790 * Issues an internal command (DCMD) to get the FW's controller PD
2780 2791 * list structure. This information is mainly used to find out SYSTEM
2781 2792 * supported by the FW.
2782 2793 */
2783 2794 int
2784 2795 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2785 2796 {
2786 2797 int ret = 0;
2787 2798 struct mrsas_cmd *cmd = NULL;
2788 2799 struct mrsas_dcmd_frame *dcmd;
2789 2800 MR_FW_RAID_MAP_ALL *ci;
2790 2801 uint32_t ci_h = 0;
2791 2802 U32 size_map_info;
2792 2803
2793 2804 cmd = get_raid_msg_pkt(instance);
2794 2805
2795 2806 if (cmd == NULL) {
2796 2807 cmn_err(CE_WARN,
2797 2808 "Failed to get a cmd from free-pool in get_ld_map_info()");
2798 2809 return (DDI_FAILURE);
2799 2810 }
2800 2811
2801 2812 dcmd = &cmd->frame->dcmd;
2802 2813
2803 2814 size_map_info = sizeof (MR_FW_RAID_MAP) +
2804 2815 (sizeof (MR_LD_SPAN_MAP) *
2805 2816 (MAX_LOGICAL_DRIVES - 1));
2806 2817
2807 2818 con_log(CL_ANN, (CE_NOTE,
2808 2819 "size_map_info : 0x%x", size_map_info));
2809 2820
2810 2821 ci = instance->ld_map[(instance->map_id & 1)];
2811 2822 ci_h = instance->ld_map_phy[(instance->map_id & 1)];
2812 2823
2813 2824 if (!ci) {
2814 2825 cmn_err(CE_WARN, "Failed to alloc mem for ld_map_info");
2815 2826 return_raid_msg_pkt(instance, cmd);
2816 2827 return (-1);
2817 2828 }
2818 2829
2819 2830 bzero(ci, sizeof (*ci));
2820 2831 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2821 2832
2822 2833 dcmd->cmd = MFI_CMD_OP_DCMD;
2823 2834 dcmd->cmd_status = 0xFF;
2824 2835 dcmd->sge_count = 1;
2825 2836 dcmd->flags = MFI_FRAME_DIR_READ;
2826 2837 dcmd->timeout = 0;
2827 2838 dcmd->pad_0 = 0;
2828 2839 dcmd->data_xfer_len = size_map_info;
2829 2840 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2830 2841 dcmd->sgl.sge32[0].phys_addr = ci_h;
2831 2842 dcmd->sgl.sge32[0].length = size_map_info;
2832 2843
2833 2844
2834 2845 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2835 2846
2836 2847 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2837 2848 ret = 0;
2838 2849 con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2839 2850 } else {
2840 2851 cmn_err(CE_WARN, "Get LD Map Info failed");
2841 2852 ret = -1;
2842 2853 }
2843 2854
2844 2855 return_raid_msg_pkt(instance, cmd);
2845 2856
2846 2857 return (ret);
2847 2858 }
2848 2859
2849 2860 void
2850 2861 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2851 2862 {
2852 2863 uint32_t i;
2853 2864 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2854 2865 union desc_value d_val;
2855 2866
2856 2867 reply_desc = instance->reply_frame_pool;
2857 2868
2858 2869 for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2859 2870 d_val.word = reply_desc->Words;
2860 2871 con_log(CL_DLEVEL3, (CE_NOTE,
2861 2872 "i=%d, %x:%x",
2862 2873 i, d_val.u1.high, d_val.u1.low));
2863 2874 }
2864 2875 }
2865 2876
2866 2877 /*
2867 2878 * mrsas_tbolt_command_create - Create command for fast path.
2868 2879 * @io_info: MegaRAID IO request packet pointer.
2869 2880 * @ref_tag: Reference tag for RD/WRPROTECT
2870 2881 *
2871 2882 * Create the command for fast path.
2872 2883 */
2873 2884 void
2874 2885 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2875 2886 struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2876 2887 U32 ref_tag)
2877 2888 {
2878 2889 uint16_t EEDPFlags;
2879 2890 uint32_t Control;
2880 2891 ddi_acc_handle_t acc_handle =
2881 2892 instance->mpi2_frame_pool_dma_obj.acc_handle;
2882 2893
2883 2894 /* Prepare 32-byte CDB if DIF is supported on this device */
2884 2895 con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2885 2896
2886 2897 bzero(cdb, 32);
2887 2898
2888 2899 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2889 2900
2890 2901
2891 2902 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
2892 2903
2893 2904 if (io_info->isRead)
2894 2905 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2895 2906 else
2896 2907 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2897 2908
2898 2909 /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2899 2910 cdb[10] = MRSAS_RD_WR_PROTECT;
2900 2911
2901 2912 /* LOGICAL BLOCK ADDRESS */
2902 2913 cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2903 2914 cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2904 2915 cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2905 2916 cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2906 2917 cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2907 2918 cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2908 2919 cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2909 2920 cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2910 2921
2911 2922 /* Logical block reference tag */
2912 2923 ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2913 2924 BE_32(ref_tag));
2914 2925
2915 2926 ddi_put16(acc_handle,
2916 2927 &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
2917 2928
2918 2929 ddi_put32(acc_handle, &scsi_io_request->DataLength,
2919 2930 ((io_info->numBlocks)*512));
2920 2931 /* Specify 32-byte cdb */
2921 2932 ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
2922 2933
2923 2934 /* Transfer length */
2924 2935 cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
2925 2936 cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
2926 2937 cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
2927 2938 cdb[31] = (U8)((io_info->numBlocks) & 0xff);
2928 2939
2929 2940 /* set SCSI IO EEDPFlags */
2930 2941 EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
2931 2942 Control = ddi_get32(acc_handle, &scsi_io_request->Control);
2932 2943
2933 2944 /* set SCSI IO EEDPFlags bits */
2934 2945 if (io_info->isRead) {
2935 2946 /*
2936 2947 * For READ commands, the EEDPFlags shall be set to specify to
2937 2948 * Increment the Primary Reference Tag, to Check the Reference
2938 2949 * Tag, and to Check and Remove the Protection Information
2939 2950 * fields.
2940 2951 */
2941 2952 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2942 2953 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2943 2954 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
2944 2955 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
2945 2956 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2946 2957 } else {
2947 2958 /*
2948 2959 * For WRITE commands, the EEDPFlags shall be set to specify to
2949 2960 * Increment the Primary Reference Tag, and to Insert
2950 2961 * Protection Information fields.
2951 2962 */
2952 2963 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2953 2964 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2954 2965 }
2955 2966 Control |= (0x4 << 26);
2956 2967
2957 2968 ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
2958 2969 ddi_put32(acc_handle, &scsi_io_request->Control, Control);
2959 2970 ddi_put32(acc_handle,
2960 2971 &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
2961 2972 }
2962 2973
2963 2974
2964 2975 /*
2965 2976 * mrsas_tbolt_set_pd_lba - Sets PD LBA
2966 2977 * @cdb: CDB
2967 2978 * @cdb_len: cdb length
2968 2979 * @start_blk: Start block of IO
2969 2980 *
2970 2981 * Used to set the PD LBA in CDB for FP IOs
2971 2982 */
2972 2983 static void
2973 2984 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
2974 2985 U32 num_blocks)
2975 2986 {
2976 2987 U8 cdb_len = *cdb_len_ptr;
2977 2988 U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
2978 2989
2979 2990 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
2980 2991 if (((cdb_len == 12) || (cdb_len == 16)) &&
2981 2992 (start_blk <= 0xffffffff)) {
2982 2993 if (cdb_len == 16) {
2983 2994 con_log(CL_ANN,
2984 2995 (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
2985 2996 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
2986 2997 flagvals = cdb[1];
2987 2998 groupnum = cdb[14];
2988 2999 control = cdb[15];
2989 3000 } else {
2990 3001 con_log(CL_ANN,
2991 3002 (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
2992 3003 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
2993 3004 flagvals = cdb[1];
2994 3005 groupnum = cdb[10];
2995 3006 control = cdb[11];
2996 3007 }
2997 3008
2998 3009 bzero(cdb, sizeof (cdb));
2999 3010
3000 3011 cdb[0] = opcode;
3001 3012 cdb[1] = flagvals;
3002 3013 cdb[6] = groupnum;
3003 3014 cdb[9] = control;
3004 3015 /* Set transfer length */
3005 3016 cdb[8] = (U8)(num_blocks & 0xff);
3006 3017 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3007 3018 cdb_len = 10;
3008 3019 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3009 3020 /* Convert to 16 byte CDB for large LBA's */
3010 3021 con_log(CL_ANN,
3011 3022 (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3012 3023 switch (cdb_len) {
3013 3024 case 6:
3014 3025 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3015 3026 control = cdb[5];
3016 3027 break;
3017 3028 case 10:
3018 3029 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3019 3030 flagvals = cdb[1];
3020 3031 groupnum = cdb[6];
3021 3032 control = cdb[9];
3022 3033 break;
3023 3034 case 12:
3024 3035 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3025 3036 flagvals = cdb[1];
3026 3037 groupnum = cdb[10];
3027 3038 control = cdb[11];
3028 3039 break;
3029 3040 }
3030 3041
3031 3042 bzero(cdb, sizeof (cdb));
3032 3043
3033 3044 cdb[0] = opcode;
3034 3045 cdb[1] = flagvals;
3035 3046 cdb[14] = groupnum;
3036 3047 cdb[15] = control;
3037 3048
3038 3049 /* Transfer length */
3039 3050 cdb[13] = (U8)(num_blocks & 0xff);
3040 3051 cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3041 3052 cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3042 3053 cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3043 3054
3044 3055 /* Specify 16-byte cdb */
3045 3056 cdb_len = 16;
3046 3057 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3047 3058 /* convert to 10 byte CDB */
3048 3059 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3049 3060 control = cdb[5];
3050 3061
3051 3062 bzero(cdb, sizeof (cdb));
3052 3063 cdb[0] = opcode;
3053 3064 cdb[9] = control;
3054 3065
3055 3066 /* Set transfer length */
3056 3067 cdb[8] = (U8)(num_blocks & 0xff);
3057 3068 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3058 3069
3059 3070 /* Specify 10-byte cdb */
3060 3071 cdb_len = 10;
3061 3072 }
3062 3073
3063 3074
3064 3075 /* Fall through Normal case, just load LBA here */
3065 3076 switch (cdb_len) {
3066 3077 case 6:
3067 3078 {
3068 3079 U8 val = cdb[1] & 0xE0;
3069 3080 cdb[3] = (U8)(start_blk & 0xff);
3070 3081 cdb[2] = (U8)((start_blk >> 8) & 0xff);
3071 3082 cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3072 3083 break;
3073 3084 }
3074 3085 case 10:
3075 3086 cdb[5] = (U8)(start_blk & 0xff);
3076 3087 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3077 3088 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3078 3089 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3079 3090 break;
3080 3091 case 12:
3081 3092 cdb[5] = (U8)(start_blk & 0xff);
3082 3093 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3083 3094 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3084 3095 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3085 3096 break;
3086 3097
3087 3098 case 16:
3088 3099 cdb[9] = (U8)(start_blk & 0xff);
3089 3100 cdb[8] = (U8)((start_blk >> 8) & 0xff);
3090 3101 cdb[7] = (U8)((start_blk >> 16) & 0xff);
3091 3102 cdb[6] = (U8)((start_blk >> 24) & 0xff);
3092 3103 cdb[5] = (U8)((start_blk >> 32) & 0xff);
3093 3104 cdb[4] = (U8)((start_blk >> 40) & 0xff);
3094 3105 cdb[3] = (U8)((start_blk >> 48) & 0xff);
3095 3106 cdb[2] = (U8)((start_blk >> 56) & 0xff);
3096 3107 break;
3097 3108 }
3098 3109
3099 3110 *cdb_len_ptr = cdb_len;
3100 3111 }
3101 3112
3102 3113
3103 3114 static int
3104 3115 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3105 3116 {
3106 3117 MR_FW_RAID_MAP_ALL *ld_map;
3107 3118
3108 3119 if (!mrsas_tbolt_get_ld_map_info(instance)) {
3109 3120
3110 3121 ld_map = instance->ld_map[(instance->map_id & 1)];
3111 3122
3112 3123 con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3113 3124 ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3114 3125
3115 3126 if (MR_ValidateMapInfo(instance->ld_map[
3116 3127 (instance->map_id & 1)], instance->load_balance_info)) {
3117 3128 con_log(CL_ANN,
3118 3129 (CE_CONT, "MR_ValidateMapInfo success"));
3119 3130
3120 3131 instance->fast_path_io = 1;
3121 3132 con_log(CL_ANN,
3122 3133 (CE_NOTE, "instance->fast_path_io %d",
3123 3134 instance->fast_path_io));
3124 3135
3125 3136 return (DDI_SUCCESS);
3126 3137 }
3127 3138
3128 3139 }
3129 3140
3130 3141 instance->fast_path_io = 0;
3131 3142 cmn_err(CE_WARN, "MR_ValidateMapInfo failed");
3132 3143 con_log(CL_ANN, (CE_NOTE,
3133 3144 "instance->fast_path_io %d", instance->fast_path_io));
3134 3145
3135 3146 return (DDI_FAILURE);
3136 3147 }
3137 3148
3138 3149 /*
3139 3150 * Marks HBA as bad. This will be called either when an
3140 3151 * IO packet times out even after 3 FW resets
3141 3152 * or FW is found to be fault even after 3 continuous resets.
3142 3153 */
3143 3154
3144 3155 void
3145 3156 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3146 3157 {
3147 3158 cmn_err(CE_NOTE, "TBOLT Kill adapter called");
3148 3159
3149 3160 if (instance->deadadapter == 1)
3150 3161 return;
3151 3162
3152 3163 con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3153 3164 "Writing to doorbell with MFI_STOP_ADP "));
3154 3165 mutex_enter(&instance->ocr_flags_mtx);
3155 3166 instance->deadadapter = 1;
3156 3167 mutex_exit(&instance->ocr_flags_mtx);
3157 3168 instance->func_ptr->disable_intr(instance);
3158 3169 WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3159 3170 /* Flush */
3160 3171 (void) RD_RESERVED0_REGISTER(instance);
3161 3172
3162 3173 (void) mrsas_print_pending_cmds(instance);
3163 3174 (void) mrsas_complete_pending_cmds(instance);
3164 3175 }
3165 3176
3166 3177 void
3167 3178 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3168 3179 {
3169 3180 int i;
3170 3181 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3171 3182 instance->reply_read_index = 0;
3172 3183
3173 3184 /* initializing reply address to 0xFFFFFFFF */
3174 3185 reply_desc = instance->reply_frame_pool;
3175 3186
3176 3187 for (i = 0; i < instance->reply_q_depth; i++) {
3177 3188 reply_desc->Words = (uint64_t)~0;
3178 3189 reply_desc++;
3179 3190 }
3180 3191 }
3181 3192
3182 3193 int
3183 3194 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3184 3195 {
3185 3196 uint32_t status = 0x00;
3186 3197 uint32_t retry = 0;
3187 3198 uint32_t cur_abs_reg_val;
3188 3199 uint32_t fw_state;
3189 3200 uint32_t abs_state;
3190 3201 uint32_t i;
3191 3202
3192 3203 con_log(CL_ANN, (CE_NOTE,
3193 3204 "mrsas_tbolt_reset_ppc entered"));
3194 3205
3195 3206 if (instance->deadadapter == 1) {
3196 3207 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3197 3208 "no more resets as HBA has been marked dead ");
3198 3209 return (DDI_FAILURE);
3199 3210 }
3200 3211
3201 3212 mutex_enter(&instance->ocr_flags_mtx);
3202 3213 instance->adapterresetinprogress = 1;
3203 3214 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3204 3215 "adpterresetinprogress flag set, time %llx", gethrtime()));
3205 3216 mutex_exit(&instance->ocr_flags_mtx);
3206 3217
3207 3218 instance->func_ptr->disable_intr(instance);
3208 3219
3209 3220 /* Add delay inorder to complete the ioctl & io cmds in-flight */
3210 3221 for (i = 0; i < 3000; i++) {
3211 3222 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3212 3223 }
3213 3224
3214 3225 instance->reply_read_index = 0;
3215 3226
3216 3227 retry_reset:
3217 3228 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3218 3229 ":Resetting TBOLT "));
3219 3230
3220 3231 WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3221 3232 WR_TBOLT_IB_WRITE_SEQ(4, instance);
3222 3233 WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3223 3234 WR_TBOLT_IB_WRITE_SEQ(2, instance);
3224 3235 WR_TBOLT_IB_WRITE_SEQ(7, instance);
3225 3236 WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3226 3237 con_log(CL_ANN1, (CE_NOTE,
3227 3238 "mrsas_tbolt_reset_ppc: magic number written "
3228 3239 "to write sequence register"));
3229 3240 delay(100 * drv_usectohz(MILLISEC));
3230 3241 status = RD_TBOLT_HOST_DIAG(instance);
3231 3242 con_log(CL_ANN1, (CE_NOTE,
3232 3243 "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3233 3244 "to write sequence register"));
3234 3245
3235 3246 while (status & DIAG_TBOLT_RESET_ADAPTER) {
3236 3247 delay(100 * drv_usectohz(MILLISEC));
3237 3248 status = RD_TBOLT_HOST_DIAG(instance);
3238 3249 if (retry++ == 100) {
3239 3250 cmn_err(CE_WARN,
3240 3251 "mrsas_tbolt_reset_ppc:"
3241 3252 "resetadapter bit is set already "
3242 3253 "check retry count %d", retry);
3243 3254 return (DDI_FAILURE);
3244 3255 }
3245 3256 }
3246 3257
3247 3258 WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3248 3259 delay(100 * drv_usectohz(MILLISEC));
3249 3260
3250 3261 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3251 3262 (uint8_t *)((uintptr_t)(instance)->regmap +
3252 3263 RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3253 3264
3254 3265 while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3255 3266 delay(100 * drv_usectohz(MILLISEC));
3256 3267 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3257 3268 (uint8_t *)((uintptr_t)(instance)->regmap +
3258 3269 RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3259 3270 if (retry++ == 100) {
3260 3271 /* Dont call kill adapter here */
3261 3272 /* RESET BIT ADAPTER is cleared by firmare */
3262 3273 /* mrsas_tbolt_kill_adapter(instance); */
3263 3274 cmn_err(CE_WARN,
3264 3275 "mr_sas %d: %s(): RESET FAILED; return failure!!!",
3265 3276 instance->instance, __func__);
3266 3277 return (DDI_FAILURE);
3267 3278 }
3268 3279 }
3269 3280
3270 3281 con_log(CL_ANN,
3271 3282 (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3272 3283 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3273 3284 "Calling mfi_state_transition_to_ready"));
3274 3285
3275 3286 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3276 3287 retry = 0;
3277 3288 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3278 3289 delay(100 * drv_usectohz(MILLISEC));
3279 3290 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3280 3291 }
3281 3292 if (abs_state <= MFI_STATE_FW_INIT) {
3282 3293 cmn_err(CE_WARN,
3283 3294 "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3284 3295 "state = 0x%x, RETRY RESET.", abs_state);
3285 3296 goto retry_reset;
3286 3297 }
3287 3298
3288 3299 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3289 3300 if (mfi_state_transition_to_ready(instance) ||
3290 3301 debug_tbolt_fw_faults_after_ocr_g == 1) {
3291 3302 cur_abs_reg_val =
3292 3303 instance->func_ptr->read_fw_status_reg(instance);
3293 3304 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3294 3305
3295 3306 con_log(CL_ANN1, (CE_NOTE,
3296 3307 "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3297 3308 "FW state = 0x%x", fw_state));
3298 3309 if (debug_tbolt_fw_faults_after_ocr_g == 1)
3299 3310 fw_state = MFI_STATE_FAULT;
3300 3311
3301 3312 con_log(CL_ANN,
3302 3313 (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
3303 3314 "FW state = 0x%x", fw_state));
3304 3315
3305 3316 if (fw_state == MFI_STATE_FAULT) {
3306 3317 /* increment the count */
3307 3318 instance->fw_fault_count_after_ocr++;
3308 3319 if (instance->fw_fault_count_after_ocr
3309 3320 < MAX_FW_RESET_COUNT) {
3310 3321 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3311 3322 "FW is in fault after OCR count %d "
3312 3323 "Retry Reset",
3313 3324 instance->fw_fault_count_after_ocr);
3314 3325 goto retry_reset;
3315 3326
3316 3327 } else {
3317 3328 cmn_err(CE_WARN, "mrsas %d: %s:"
3318 3329 "Max Reset Count exceeded >%d"
3319 3330 "Mark HBA as bad, KILL adapter",
3320 3331 instance->instance, __func__,
3321 3332 MAX_FW_RESET_COUNT);
3322 3333
3323 3334 mrsas_tbolt_kill_adapter(instance);
3324 3335 return (DDI_FAILURE);
3325 3336 }
3326 3337 }
3327 3338 }
3328 3339
3329 3340 /* reset the counter as FW is up after OCR */
3330 3341 instance->fw_fault_count_after_ocr = 0;
3331 3342
3332 3343 mrsas_reset_reply_desc(instance);
3333 3344
3334 3345
3335 3346 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3336 3347 "Calling mrsas_issue_init_mpi2"));
3337 3348 abs_state = mrsas_issue_init_mpi2(instance);
3338 3349 if (abs_state == (uint32_t)DDI_FAILURE) {
3339 3350 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3340 3351 "INIT failed Retrying Reset");
3341 3352 goto retry_reset;
3342 3353 }
3343 3354 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3344 3355 "mrsas_issue_init_mpi2 Done"));
3345 3356
3346 3357 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3347 3358 "Calling mrsas_print_pending_cmd"));
3348 3359 (void) mrsas_print_pending_cmds(instance);
3349 3360 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3350 3361 "mrsas_print_pending_cmd done"));
3351 3362
3352 3363 instance->func_ptr->enable_intr(instance);
3353 3364 instance->fw_outstanding = 0;
3354 3365
3355 3366 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3356 3367 "Calling mrsas_issue_pending_cmds"));
3357 3368 (void) mrsas_issue_pending_cmds(instance);
3358 3369 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3359 3370 "issue_pending_cmds done."));
3360 3371
3361 3372 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3362 3373 "Calling aen registration"));
3363 3374
3364 3375 instance->aen_cmd->retry_count_for_ocr = 0;
3365 3376 instance->aen_cmd->drv_pkt_time = 0;
3366 3377
3367 3378 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3368 3379
3369 3380 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag."));
3370 3381 mutex_enter(&instance->ocr_flags_mtx);
3371 3382 instance->adapterresetinprogress = 0;
3372 3383 mutex_exit(&instance->ocr_flags_mtx);
3373 3384 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3374 3385 "adpterresetinprogress flag unset"));
3375 3386
3376 3387 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done"));
3377 3388 return (DDI_SUCCESS);
3378 3389
3379 3390 }
3380 3391
3381 3392
3382 3393 /*
3383 3394 * mrsas_sync_map_info - Returns FW's ld_map structure
3384 3395 * @instance: Adapter soft state
3385 3396 *
3386 3397 * Issues an internal command (DCMD) to get the FW's controller PD
3387 3398 * list structure. This information is mainly used to find out SYSTEM
3388 3399 * supported by the FW.
3389 3400 */
3390 3401
3391 3402 static int
3392 3403 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3393 3404 {
3394 3405 int ret = 0, i;
3395 3406 struct mrsas_cmd *cmd = NULL;
3396 3407 struct mrsas_dcmd_frame *dcmd;
3397 3408 uint32_t size_sync_info, num_lds;
3398 3409 LD_TARGET_SYNC *ci = NULL;
3399 3410 MR_FW_RAID_MAP_ALL *map;
3400 3411 MR_LD_RAID *raid;
3401 3412 LD_TARGET_SYNC *ld_sync;
3402 3413 uint32_t ci_h = 0;
3403 3414 uint32_t size_map_info;
3404 3415
3405 3416 cmd = get_raid_msg_pkt(instance);
3406 3417
3407 3418 if (cmd == NULL) {
3408 3419 cmn_err(CE_WARN, "Failed to get a cmd from free-pool in "
3409 3420 "mrsas_tbolt_sync_map_info(). ");
3410 3421 return (DDI_FAILURE);
3411 3422 }
3412 3423
3413 3424 /* Clear the frame buffer and assign back the context id */
3414 3425 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3415 3426 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3416 3427 cmd->index);
3417 3428 bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3418 3429
3419 3430
3420 3431 map = instance->ld_map[instance->map_id & 1];
3421 3432
3422 3433 num_lds = map->raidMap.ldCount;
3423 3434
3424 3435 dcmd = &cmd->frame->dcmd;
3425 3436
3426 3437 size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3427 3438
3428 3439 con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3429 3440 size_sync_info, num_lds));
3430 3441
3431 3442 ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3432 3443
3433 3444 bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3434 3445 ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3435 3446
3436 3447 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3437 3448
3438 3449 ld_sync = (LD_TARGET_SYNC *)ci;
3439 3450
3440 3451 for (i = 0; i < num_lds; i++, ld_sync++) {
3441 3452 raid = MR_LdRaidGet(i, map);
3442 3453
3443 3454 con_log(CL_ANN1,
3444 3455 (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3445 3456 i, raid->seqNum, raid->flags.ldSyncRequired));
3446 3457
3447 3458 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3448 3459
3449 3460 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3450 3461 i, ld_sync->ldTargetId));
3451 3462
3452 3463 ld_sync->seqNum = raid->seqNum;
3453 3464 }
3454 3465
3455 3466
3456 3467 size_map_info = sizeof (MR_FW_RAID_MAP) +
3457 3468 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3458 3469
3459 3470 dcmd->cmd = MFI_CMD_OP_DCMD;
3460 3471 dcmd->cmd_status = 0xFF;
3461 3472 dcmd->sge_count = 1;
3462 3473 dcmd->flags = MFI_FRAME_DIR_WRITE;
3463 3474 dcmd->timeout = 0;
3464 3475 dcmd->pad_0 = 0;
3465 3476 dcmd->data_xfer_len = size_map_info;
3466 3477 ASSERT(num_lds <= 255);
3467 3478 dcmd->mbox.b[0] = (U8)num_lds;
3468 3479 dcmd->mbox.b[1] = 1; /* Pend */
3469 3480 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3470 3481 dcmd->sgl.sge32[0].phys_addr = ci_h;
3471 3482 dcmd->sgl.sge32[0].length = size_map_info;
3472 3483
3473 3484
3474 3485 instance->map_update_cmd = cmd;
3475 3486 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3476 3487
3477 3488 instance->func_ptr->issue_cmd(cmd, instance);
3478 3489
3479 3490 instance->unroll.syncCmd = 1;
3480 3491 con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3481 3492
3482 3493 return (ret);
3483 3494 }
3484 3495
3485 3496 /*
3486 3497 * abort_syncmap_cmd
3487 3498 */
3488 3499 int
3489 3500 abort_syncmap_cmd(struct mrsas_instance *instance,
3490 3501 struct mrsas_cmd *cmd_to_abort)
3491 3502 {
3492 3503 int ret = 0;
3493 3504
3494 3505 struct mrsas_cmd *cmd;
3495 3506 struct mrsas_abort_frame *abort_fr;
3496 3507
3497 3508 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3498 3509
3499 3510 cmd = get_raid_msg_mfi_pkt(instance);
3500 3511
3501 3512 if (!cmd) {
3502 3513 cmn_err(CE_WARN,
3503 3514 "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3504 3515 return (DDI_FAILURE);
3505 3516 }
3506 3517 /* Clear the frame buffer and assign back the context id */
3507 3518 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3508 3519 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3509 3520 cmd->index);
3510 3521
3511 3522 abort_fr = &cmd->frame->abort;
3512 3523
3513 3524 /* prepare and issue the abort frame */
3514 3525 ddi_put8(cmd->frame_dma_obj.acc_handle,
3515 3526 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3516 3527 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3517 3528 MFI_CMD_STATUS_SYNC_MODE);
3518 3529 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3519 3530 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3520 3531 cmd_to_abort->index);
3521 3532 ddi_put32(cmd->frame_dma_obj.acc_handle,
3522 3533 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3523 3534 ddi_put32(cmd->frame_dma_obj.acc_handle,
3524 3535 &abort_fr->abort_mfi_phys_addr_hi, 0);
3525 3536
3526 3537 cmd->frame_count = 1;
3527 3538
3528 3539 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3529 3540
3530 3541 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3531 3542 con_log(CL_ANN1, (CE_WARN,
3532 3543 "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3533 3544 ret = -1;
3534 3545 } else {
3535 3546 ret = 0;
3536 3547 }
3537 3548
3538 3549 return_raid_msg_mfi_pkt(instance, cmd);
3539 3550
3540 3551 atomic_add_16(&instance->fw_outstanding, (-1));
3541 3552
3542 3553 return (ret);
3543 3554 }
3544 3555
3545 3556
3546 3557 #ifdef PDSUPPORT
3547 3558 /*
3548 3559 * Even though these functions were originally intended for 2208 only, it
3549 3560 * turns out they're useful for "Skinny" support as well. In a perfect world,
3550 3561 * these two functions would be either in mr_sas.c, or in their own new source
3551 3562 * file. Since this driver needs some cleanup anyway, keep this portion in
3552 3563 * mind as well.
3553 3564 */
3554 3565
3555 3566 int
3556 3567 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3557 3568 uint8_t lun, dev_info_t **ldip)
3558 3569 {
3559 3570 struct scsi_device *sd;
3560 3571 dev_info_t *child;
3561 3572 int rval, dtype;
3562 3573 struct mrsas_tbolt_pd_info *pds = NULL;
3563 3574
3564 3575 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3565 3576 tgt, lun));
3566 3577
3567 3578 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3568 3579 if (ldip) {
3569 3580 *ldip = child;
3570 3581 }
3571 3582 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3572 3583 rval = mrsas_service_evt(instance, tgt, 1,
3573 3584 MRSAS_EVT_UNCONFIG_TGT, NULL);
3574 3585 con_log(CL_ANN1, (CE_WARN,
3575 3586 "mr_sas:DELETING STALE ENTRY rval = %d "
3576 3587 "tgt id = %d", rval, tgt));
3577 3588 return (NDI_FAILURE);
3578 3589 }
3579 3590 return (NDI_SUCCESS);
3580 3591 }
3581 3592
3582 3593 pds = (struct mrsas_tbolt_pd_info *)
3583 3594 kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3584 3595 mrsas_tbolt_get_pd_info(instance, pds, tgt);
3585 3596 dtype = pds->scsiDevType;
3586 3597
3587 3598 /* Check for Disk */
3588 3599 if ((dtype == DTYPE_DIRECT)) {
3589 3600 if ((dtype == DTYPE_DIRECT) &&
3590 3601 (LE_16(pds->fwState) != PD_SYSTEM)) {
3591 3602 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3592 3603 return (NDI_FAILURE);
3593 3604 }
3594 3605 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3595 3606 sd->sd_address.a_hba_tran = instance->tran;
3596 3607 sd->sd_address.a_target = (uint16_t)tgt;
3597 3608 sd->sd_address.a_lun = (uint8_t)lun;
3598 3609
3599 3610 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3600 3611 rval = mrsas_config_scsi_device(instance, sd, ldip);
3601 3612 con_log(CL_DLEVEL1, (CE_NOTE,
3602 3613 "Phys. device found: tgt %d dtype %d: %s",
3603 3614 tgt, dtype, sd->sd_inq->inq_vid));
3604 3615 } else {
3605 3616 rval = NDI_FAILURE;
3606 3617 con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3607 3618 "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3608 3619 tgt, dtype, sd->sd_inq->inq_vid));
3609 3620 }
3610 3621
3611 3622 /* sd_unprobe is blank now. Free buffer manually */
3612 3623 if (sd->sd_inq) {
3613 3624 kmem_free(sd->sd_inq, SUN_INQSIZE);
3614 3625 sd->sd_inq = (struct scsi_inquiry *)NULL;
3615 3626 }
3616 3627 kmem_free(sd, sizeof (struct scsi_device));
3617 3628 } else {
3618 3629 con_log(CL_ANN1, (CE_NOTE,
3619 3630 "Device not supported: tgt %d lun %d dtype %d",
3620 3631 tgt, lun, dtype));
3621 3632 rval = NDI_FAILURE;
3622 3633 }
3623 3634
3624 3635 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3625 3636 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3626 3637 rval));
3627 3638 return (rval);
3628 3639 }
3629 3640
3630 3641 static void
3631 3642 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3632 3643 struct mrsas_tbolt_pd_info *pds, int tgt)
3633 3644 {
3634 3645 struct mrsas_cmd *cmd;
3635 3646 struct mrsas_dcmd_frame *dcmd;
3636 3647 dma_obj_t dcmd_dma_obj;
3637 3648
3638 3649 ASSERT(instance->tbolt || instance->skinny);
3639 3650
3640 3651 if (instance->tbolt)
3641 3652 cmd = get_raid_msg_pkt(instance);
3642 3653 else
3643 3654 cmd = mrsas_get_mfi_pkt(instance);
3644 3655
3645 3656 if (!cmd) {
3646 3657 con_log(CL_ANN1,
3647 3658 (CE_WARN, "Failed to get a cmd for get pd info"));
3648 3659 return;
3649 3660 }
3650 3661
3651 3662 /* Clear the frame buffer and assign back the context id */
3652 3663 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3653 3664 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3654 3665 cmd->index);
3655 3666
3656 3667
3657 3668 dcmd = &cmd->frame->dcmd;
3658 3669 dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3659 3670 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3660 3671 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3661 3672 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3662 3673 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3663 3674 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3664 3675
3665 3676 (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3666 3677 DDI_STRUCTURE_LE_ACC);
3667 3678 bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3668 3679 bzero(dcmd->mbox.b, 12);
3669 3680 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3670 3681 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3671 3682 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3672 3683 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3673 3684 MFI_FRAME_DIR_READ);
3674 3685 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3675 3686 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3676 3687 sizeof (struct mrsas_tbolt_pd_info));
3677 3688 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3678 3689 MR_DCMD_PD_GET_INFO);
3679 3690 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3680 3691 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3681 3692 sizeof (struct mrsas_tbolt_pd_info));
3682 3693 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3683 3694 dcmd_dma_obj.dma_cookie[0].dmac_address);
3684 3695
3685 3696 cmd->sync_cmd = MRSAS_TRUE;
3686 3697 cmd->frame_count = 1;
3687 3698
3688 3699 if (instance->tbolt)
3689 3700 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3690 3701
3691 3702 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3692 3703
3693 3704 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3694 3705 (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3695 3706 DDI_DEV_AUTOINCR);
3696 3707 (void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3697 3708
3698 3709 if (instance->tbolt)
3699 3710 return_raid_msg_pkt(instance, cmd);
3700 3711 else
3701 3712 mrsas_return_mfi_pkt(instance, cmd);
3702 3713 }
3703 3714 #endif
↓ open down ↓ |
1435 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX