1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1, (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1, (the "License").
26
27 * You may not use this file except in compliance with the License.
28
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35
36 #include "bcm_osal.h"
37 #include "ecore.h"
38 #include "ecore_status.h"
39 #include "nvm_map.h"
40 #include "nvm_cfg.h"
41 #include "ecore_mcp.h"
42 #include "mcp_public.h"
43 #include "reg_addr.h"
44 #include "ecore_hw.h"
45 #include "ecore_init_fw_funcs.h"
46 #include "ecore_sriov.h"
47 #include "ecore_vf.h"
48 #include "ecore_iov_api.h"
49 #include "ecore_gtt_reg_addr.h"
50 #include "ecore_iro.h"
51 #include "ecore_dcbx.h"
52 #include "ecore_sp_commands.h"
53
54 #define CHIP_MCP_RESP_ITER_US 10
55 #define EMUL_MCP_RESP_ITER_US 1000 * 1000
56
57 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
58 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
59
60 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
61 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
62 _val)
63
64 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
65 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
66
67 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
68 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
69 OFFSETOF(struct public_drv_mb, _field), _val)
70
71 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
72 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
73 OFFSETOF(struct public_drv_mb, _field))
74
75 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
76 DRV_ID_PDA_COMP_VER_SHIFT)
77
78 #define MCP_BYTES_PER_MBIT_SHIFT 17
79
80 #ifndef ASIC_ONLY
81 static int loaded;
82 static int loaded_port[MAX_NUM_PORTS] = { 0 };
83 #endif
84
85 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
86 {
87 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
88 return false;
89 return true;
90 }
91
92 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
93 struct ecore_ptt *p_ptt)
94 {
95 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
96 PUBLIC_PORT);
97 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
98
99 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
100 MFW_PORT(p_hwfn));
101 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
102 "port_addr = 0x%x, port_id 0x%02x\n",
103 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
104 }
105
106 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn,
107 struct ecore_ptt *p_ptt)
108 {
109 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
110 OSAL_BE32 tmp;
111 u32 i;
112
113 #ifndef ASIC_ONLY
114 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
115 return;
116 #endif
117
118 if (!p_hwfn->mcp_info->public_base)
119 return;
120
121 for (i = 0; i < length; i++) {
122 tmp = ecore_rd(p_hwfn, p_ptt,
123 p_hwfn->mcp_info->mfw_mb_addr +
124 (i << 2) + sizeof(u32));
125
126 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
127 OSAL_BE32_TO_CPU(tmp);
128 }
129 }
130
131 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
132 {
133 if (p_hwfn->mcp_info) {
134 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
135 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
136 #ifdef CONFIG_ECORE_LOCK_ALLOC
137 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
138 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
139 #endif
140 }
141 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
142 p_hwfn->mcp_info = OSAL_NULL;
143
144 return ECORE_SUCCESS;
145 }
146
147 enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
148 struct ecore_ptt *p_ptt)
149 {
150 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
151 u32 drv_mb_offsize, mfw_mb_offsize;
152 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
153
154 #ifndef ASIC_ONLY
155 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
156 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
157 p_info->public_base = 0;
158 return ECORE_INVAL;
159 }
160 #endif
161
162 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
163 if (!p_info->public_base)
164 return ECORE_INVAL;
165
166 p_info->public_base |= GRCBASE_MCP;
167
168 /* Calculate the driver and MFW mailbox address */
169 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
170 SECTION_OFFSIZE_ADDR(p_info->public_base,
171 PUBLIC_DRV_MB));
172 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
173 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
174 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
175 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
176
177 /* Set the MFW MB address */
178 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
179 SECTION_OFFSIZE_ADDR(p_info->public_base,
180 PUBLIC_MFW_MB));
181 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
182 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
183 p_info->mfw_mb_addr);
184
185 /* Get the current driver mailbox sequence before sending
186 * the first command
187 */
188 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
189 DRV_MSG_SEQ_NUMBER_MASK;
190
191 /* Get current FW pulse sequence */
192 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
193 DRV_PULSE_SEQ_MASK;
194
195 p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
196 MISCS_REG_GENERIC_POR_0);
197
198 return ECORE_SUCCESS;
199 }
200
201 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
202 struct ecore_ptt *p_ptt)
203 {
204 struct ecore_mcp_info *p_info;
205 u32 size;
206
207 /* Allocate mcp_info structure */
208 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
209 sizeof(*p_hwfn->mcp_info));
210 if (!p_hwfn->mcp_info)
211 goto err;
212 p_info = p_hwfn->mcp_info;
213
214 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
215 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
216 /* Do not free mcp_info here, since public_base indicate that
217 * the MCP is not initialized
218 */
219 return ECORE_SUCCESS;
220 }
221
222 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
223 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
224 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
225 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
226 goto err;
227
228 /* Initialize the MFW spinlock */
229 #ifdef CONFIG_ECORE_LOCK_ALLOC
230 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
231 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
232 #endif
233 OSAL_SPIN_LOCK_INIT(&p_info->lock);
234 OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
235
236 return ECORE_SUCCESS;
237
238 err:
239 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
240 ecore_mcp_free(p_hwfn);
241 return ECORE_NOMEM;
242
243 }
244
245 /* Locks the MFW mailbox of a PF to ensure a single access.
246 * The lock is achieved in most cases by holding a spinlock, causing other
247 * threads to wait till a previous access is done.
248 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
249 * access is achieved by setting a blocking flag, which will fail other
250 * competing contexts to send their mailboxes.
251 */
252 static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
253 u32 cmd)
254 {
255 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
256
257 /* The spinlock shouldn't be acquired when the mailbox command is
258 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
259 * pending [UN]LOAD_REQ command of another PF together with a spinlock
260 * (i.e. interrupts are disabled) - can lead to a deadlock.
261 * It is assumed that for a single PF, no other mailbox commands can be
262 * sent from another context while sending LOAD_REQ, and that any
263 * parallel commands to UNLOAD_REQ can be cancelled.
264 */
265 if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
266 p_hwfn->mcp_info->block_mb_sending = false;
267
268 /* There's at least a single command that is sent by ecore during the
269 * load sequence [expectation of MFW].
270 */
271 if ((p_hwfn->mcp_info->block_mb_sending) &&
272 (cmd != DRV_MSG_CODE_FEATURE_SUPPORT)) {
273 DP_NOTICE(p_hwfn, false,
274 "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
275 cmd);
276 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
277 return ECORE_BUSY;
278 }
279
280 if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
281 p_hwfn->mcp_info->block_mb_sending = true;
282 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
283 }
284
285 return ECORE_SUCCESS;
286 }
287
288 static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
289 {
290 if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
291 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
292 }
293
294 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
295 struct ecore_ptt *p_ptt)
296 {
297 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
298 u32 delay = CHIP_MCP_RESP_ITER_US;
299 u32 org_mcp_reset_seq, cnt = 0;
300 enum _ecore_status_t rc = ECORE_SUCCESS;
301
302 #ifndef ASIC_ONLY
303 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
304 delay = EMUL_MCP_RESP_ITER_US;
305 #endif
306
307 /* Ensure that only a single thread is accessing the mailbox at a
308 * certain time.
309 */
310 rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
311 if (rc != ECORE_SUCCESS)
312 return rc;
313
314 /* Set drv command along with the updated sequence */
315 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
316 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
317
318 do {
319 /* Wait for MFW response */
320 OSAL_UDELAY(delay);
321 /* Give the FW up to 500 second (50*1000*10usec) */
322 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
323 MISCS_REG_GENERIC_POR_0)) &&
324 (cnt++ < ECORE_MCP_RESET_RETRIES));
325
326 if (org_mcp_reset_seq !=
327 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
328 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
329 "MCP was reset after %d usec\n", cnt * delay);
330 } else {
331 DP_ERR(p_hwfn, "Failed to reset MCP\n");
332 rc = ECORE_AGAIN;
333 }
334
335 ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
336
337 return rc;
338 }
339
340 void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
341 struct ecore_ptt *p_ptt)
342 {
343 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
344
345 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
346 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
347 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
348 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
349 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
350 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
351 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
352
353 DP_NOTICE(p_hwfn, false,
354 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
355 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
356 }
357
358 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
359 struct ecore_ptt *p_ptt,
360 u32 cmd, u32 param,
361 u32 *o_mcp_resp, u32 *o_mcp_param)
362 {
363 u32 delay = CHIP_MCP_RESP_ITER_US;
364 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
365 u32 seq, cnt = 1, actual_mb_seq;
366 enum _ecore_status_t rc = ECORE_SUCCESS;
367
368 #ifndef ASIC_ONLY
369 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
370 delay = EMUL_MCP_RESP_ITER_US;
371 /* There is a built-in delay of 100usec in each MFW response read */
372 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
373 max_retries /= 10;
374 #endif
375
376 /* Get actual driver mailbox sequence */
377 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
378 DRV_MSG_SEQ_NUMBER_MASK;
379
380 /* Use MCP history register to check if MCP reset occurred between
381 * init time and now.
382 */
383 if (p_hwfn->mcp_info->mcp_hist !=
384 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
385 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
386 ecore_load_mcp_offsets(p_hwfn, p_ptt);
387 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
388 }
389 seq = ++p_hwfn->mcp_info->drv_mb_seq;
390
391 /* Set drv param */
392 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
393
394 /* Set drv command along with the updated sequence */
395 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
396
397 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
398 "wrote command (%x) to MFW MB param 0x%08x\n",
399 (cmd | seq), param);
400
401 do {
402 /* Wait for MFW response */
403 OSAL_UDELAY(delay);
404 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
405
406 /* Give the FW up to 5 second (500*10ms) */
407 } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
408 (cnt++ < max_retries));
409
410 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
411 "[after %d ms] read (%x) seq is (%x) from FW MB\n",
412 cnt * delay, *o_mcp_resp, seq);
413
414 /* Is this a reply to our command? */
415 if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
416 *o_mcp_resp &= FW_MSG_CODE_MASK;
417 /* Get the MCP param */
418 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
419 } else {
420 /* FW BUG! */
421 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
422 cmd, param);
423 ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
424 *o_mcp_resp = 0;
425 rc = ECORE_AGAIN;
426 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
427 }
428 return rc;
429 }
430
431 static enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
432 struct ecore_ptt *p_ptt,
433 struct ecore_mcp_mb_params *p_mb_params)
434 {
435 union drv_union_data union_data;
436 u32 union_data_addr;
437 enum _ecore_status_t rc;
438
439 /* MCP not initialized */
440 if (!ecore_mcp_is_init(p_hwfn)) {
441 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
442 return ECORE_BUSY;
443 }
444
445 if (p_mb_params->data_src_size > sizeof(union_data) ||
446 p_mb_params->data_dst_size > sizeof(union_data)) {
447 DP_ERR(p_hwfn,
448 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
449 p_mb_params->data_src_size, p_mb_params->data_dst_size,
450 sizeof(union_data));
451 return ECORE_INVAL;
452 }
453
454 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
455 OFFSETOF(struct public_drv_mb, union_data);
456
457 /* Ensure that only a single thread is accessing the mailbox at a
458 * certain time.
459 */
460 rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
461 if (rc != ECORE_SUCCESS)
462 return rc;
463
464 OSAL_MEM_ZERO(&union_data, sizeof(union_data));
465 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
466 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
467 p_mb_params->data_src_size);
468 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
469 sizeof(union_data));
470
471 rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
472 p_mb_params->param, &p_mb_params->mcp_resp,
473 &p_mb_params->mcp_param);
474
475 if (p_mb_params->p_data_dst != OSAL_NULL &&
476 p_mb_params->data_dst_size)
477 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
478 union_data_addr, p_mb_params->data_dst_size);
479
480 ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
481
482 return rc;
483 }
484
485 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
486 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
487 u32 *o_mcp_resp, u32 *o_mcp_param)
488 {
489 struct ecore_mcp_mb_params mb_params;
490 enum _ecore_status_t rc;
491
492 #ifndef ASIC_ONLY
493 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
494 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
495 loaded--;
496 loaded_port[p_hwfn->port_id]--;
497 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
498 loaded);
499 }
500 return ECORE_SUCCESS;
501 }
502 #endif
503
504 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
505 mb_params.cmd = cmd;
506 mb_params.param = param;
507 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
508 if (rc != ECORE_SUCCESS)
509 return rc;
510
511 *o_mcp_resp = mb_params.mcp_resp;
512 *o_mcp_param = mb_params.mcp_param;
513
514 return ECORE_SUCCESS;
515 }
516
517 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
518 struct ecore_ptt *p_ptt,
519 u32 cmd,
520 u32 param,
521 u32 *o_mcp_resp,
522 u32 *o_mcp_param,
523 u32 i_txn_size,
524 u32 *i_buf)
525 {
526 struct ecore_mcp_mb_params mb_params;
527 enum _ecore_status_t rc;
528
529 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
530 mb_params.cmd = cmd;
531 mb_params.param = param;
532 mb_params.p_data_src = i_buf;
533 mb_params.data_src_size = (u8) i_txn_size;
534 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
535 if (rc != ECORE_SUCCESS)
536 return rc;
537
538 *o_mcp_resp = mb_params.mcp_resp;
539 *o_mcp_param = mb_params.mcp_param;
540
541 return ECORE_SUCCESS;
542 }
543
544 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
545 struct ecore_ptt *p_ptt,
546 u32 cmd,
547 u32 param,
548 u32 *o_mcp_resp,
549 u32 *o_mcp_param,
550 u32 *o_txn_size,
551 u32 *o_buf)
552 {
553 struct ecore_mcp_mb_params mb_params;
554 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
555 enum _ecore_status_t rc;
556
557 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
558 mb_params.cmd = cmd;
559 mb_params.param = param;
560 mb_params.p_data_dst = raw_data;
561
562 /* Use the maximal value since the actual one is part of the response */
563 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
564
565 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
566 if (rc != ECORE_SUCCESS)
567 return rc;
568
569 *o_mcp_resp = mb_params.mcp_resp;
570 *o_mcp_param = mb_params.mcp_param;
571
572 *o_txn_size = *o_mcp_param;
573 OSAL_MEMCPY(o_buf, raw_data, *o_txn_size);
574
575 return ECORE_SUCCESS;
576 }
577
578 #ifndef ASIC_ONLY
579 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
580 u32 *p_load_code)
581 {
582 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
583
584 if (!loaded) {
585 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
586 } else if (!loaded_port[p_hwfn->port_id]) {
587 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
588 } else {
589 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
590 }
591
592 /* On CMT, always tell that it's engine */
593 if (p_hwfn->p_dev->num_hwfns > 1)
594 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
595
596 *p_load_code = load_phase;
597 loaded++;
598 loaded_port[p_hwfn->port_id]++;
599
600 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
601 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
602 *p_load_code, loaded, p_hwfn->port_id,
603 loaded_port[p_hwfn->port_id]);
604 }
605 #endif
606
607 static bool
608 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
609 enum ecore_override_force_load override_force_load)
610 {
611 bool can_force_load = false;
612
613 switch (override_force_load) {
614 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
615 can_force_load = true;
616 break;
617 case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
618 can_force_load = false;
619 break;
620 default:
621 can_force_load = (drv_role == DRV_ROLE_OS &&
622 exist_drv_role == DRV_ROLE_PREBOOT) ||
623 (drv_role == DRV_ROLE_KDUMP &&
624 exist_drv_role == DRV_ROLE_OS);
625 break;
626 }
627
628 return can_force_load;
629 }
630
631 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
632 struct ecore_ptt *p_ptt)
633 {
634 u32 resp = 0, param = 0;
635 enum _ecore_status_t rc;
636
637 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
638 &resp, ¶m);
639 if (rc != ECORE_SUCCESS)
640 DP_NOTICE(p_hwfn, false,
641 "Failed to send cancel load request, rc = %d\n", rc);
642
643 return rc;
644 }
645
646 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
647 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
648 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
649 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
650 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
651 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
652 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
653
654 static u32 ecore_get_config_bitmap(void)
655 {
656 u32 config_bitmap = 0x0;
657
658 #ifdef CONFIG_ECORE_L2
659 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
660 #endif
661 #ifdef CONFIG_ECORE_SRIOV
662 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
663 #endif
664 #ifdef CONFIG_ECORE_ROCE
665 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
666 #endif
667 #ifdef CONFIG_ECORE_IWARP
668 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
669 #endif
670 #ifdef CONFIG_ECORE_FCOE
671 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
672 #endif
673 #ifdef CONFIG_ECORE_ISCSI
674 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
675 #endif
676 #ifdef CONFIG_ECORE_LL2
677 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
678 #endif
679
680 return config_bitmap;
681 }
682
683 struct ecore_load_req_in_params {
684 u8 hsi_ver;
685 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
686 #define ECORE_LOAD_REQ_HSI_VER_1 1
687 u32 drv_ver_0;
688 u32 drv_ver_1;
689 u32 fw_ver;
690 u8 drv_role;
691 u8 timeout_val;
692 u8 force_cmd;
693 bool avoid_eng_reset;
694 };
695
696 struct ecore_load_req_out_params {
697 u32 load_code;
698 u32 exist_drv_ver_0;
699 u32 exist_drv_ver_1;
700 u32 exist_fw_ver;
701 u8 exist_drv_role;
702 u8 mfw_hsi_ver;
703 bool drv_exists;
704 };
705
706 static enum _ecore_status_t
707 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
708 struct ecore_load_req_in_params *p_in_params,
709 struct ecore_load_req_out_params *p_out_params)
710 {
711 struct ecore_mcp_mb_params mb_params;
712 struct load_req_stc load_req;
713 struct load_rsp_stc load_rsp;
714 u32 hsi_ver;
715 enum _ecore_status_t rc;
716
717 OSAL_MEM_ZERO(&load_req, sizeof(load_req));
718 load_req.drv_ver_0 = p_in_params->drv_ver_0;
719 load_req.drv_ver_1 = p_in_params->drv_ver_1;
720 load_req.fw_ver = p_in_params->fw_ver;
721 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE,
722 p_in_params->drv_role);
723 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
724 p_in_params->timeout_val);
725 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
726 p_in_params->force_cmd);
727 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
728 p_in_params->avoid_eng_reset);
729
730 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
731 DRV_ID_MCP_HSI_VER_CURRENT :
732 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
733
734 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
735 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
736 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
737 mb_params.p_data_src = &load_req;
738 mb_params.data_src_size = sizeof(load_req);
739 mb_params.p_data_dst = &load_rsp;
740 mb_params.data_dst_size = sizeof(load_rsp);
741
742 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
743 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
744 mb_params.param,
745 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
746 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
747 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
748 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
749
750 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
751 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
752 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
753 load_req.drv_ver_0, load_req.drv_ver_1,
754 load_req.fw_ver, load_req.misc0,
755 ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
756 ECORE_MFW_GET_FIELD(load_req.misc0,
757 LOAD_REQ_LOCK_TO),
758 ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
759 ECORE_MFW_GET_FIELD(load_req.misc0,
760 LOAD_REQ_FLAGS0));
761
762 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
763 if (rc != ECORE_SUCCESS) {
764 DP_NOTICE(p_hwfn, false,
765 "Failed to send load request, rc = %d\n", rc);
766 return rc;
767 }
768
769 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
770 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
771 p_out_params->load_code = mb_params.mcp_resp;
772
773 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
774 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
775 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
776 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
777 load_rsp.drv_ver_0, load_rsp.drv_ver_1,
778 load_rsp.fw_ver, load_rsp.misc0,
779 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
780 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
781 ECORE_MFW_GET_FIELD(load_rsp.misc0,
782 LOAD_RSP_FLAGS0));
783
784 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
785 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
786 p_out_params->exist_fw_ver = load_rsp.fw_ver;
787 p_out_params->exist_drv_role =
788 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
789 p_out_params->mfw_hsi_ver =
790 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
791 p_out_params->drv_exists =
792 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
793 LOAD_RSP_FLAGS0_DRV_EXISTS;
794 }
795
796 return ECORE_SUCCESS;
797 }
798
799 static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
800 enum ecore_drv_role drv_role,
801 u8 *p_mfw_drv_role)
802 {
803 switch (drv_role)
804 {
805 case ECORE_DRV_ROLE_OS:
806 *p_mfw_drv_role = DRV_ROLE_OS;
807 break;
808 case ECORE_DRV_ROLE_KDUMP:
809 *p_mfw_drv_role = DRV_ROLE_KDUMP;
810 break;
811 default:
812 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
813 return ECORE_INVAL;
814 }
815
816 return ECORE_SUCCESS;
817 }
818
819 enum ecore_load_req_force {
820 ECORE_LOAD_REQ_FORCE_NONE,
821 ECORE_LOAD_REQ_FORCE_PF,
822 ECORE_LOAD_REQ_FORCE_ALL,
823 };
824
825 static enum _ecore_status_t
826 ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
827 enum ecore_load_req_force force_cmd,
828 u8 *p_mfw_force_cmd)
829 {
830 switch (force_cmd) {
831 case ECORE_LOAD_REQ_FORCE_NONE:
832 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
833 break;
834 case ECORE_LOAD_REQ_FORCE_PF:
835 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
836 break;
837 case ECORE_LOAD_REQ_FORCE_ALL:
838 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
839 break;
840 default:
841 DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
842 return ECORE_INVAL;
843 }
844
845 return ECORE_SUCCESS;
846 }
847
848 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
849 struct ecore_ptt *p_ptt,
850 struct ecore_load_req_params *p_params)
851 {
852 struct ecore_load_req_out_params out_params;
853 struct ecore_load_req_in_params in_params;
854 u8 mfw_drv_role, mfw_force_cmd;
855 enum _ecore_status_t rc;
856
857 #ifndef ASIC_ONLY
858 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
859 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
860 return ECORE_SUCCESS;
861 }
862 #endif
863
864 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
865 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
866 in_params.drv_ver_0 = ECORE_VERSION;
867 in_params.drv_ver_1 = ecore_get_config_bitmap();
868 in_params.fw_ver = STORM_FW_VERSION;
869 rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
870 if (rc != ECORE_SUCCESS)
871 return rc;
872
873 in_params.drv_role = mfw_drv_role;
874 in_params.timeout_val = p_params->timeout_val;
875 rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
876 &mfw_force_cmd);
877 if (rc != ECORE_SUCCESS)
878 return rc;
879
880 in_params.force_cmd = mfw_force_cmd;
881 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
882
883 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
884 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
885 if (rc != ECORE_SUCCESS)
886 return rc;
887
888 /* First handle cases where another load request should/might be sent:
889 * - MFW expects the old interface [HSI version = 1]
890 * - MFW responds that a force load request is required
891 */
892 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
893 DP_INFO(p_hwfn,
894 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
895
896 /* The previous load request set the mailbox blocking */
897 p_hwfn->mcp_info->block_mb_sending = false;
898
899 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
900 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
901 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
902 &out_params);
903 if (rc != ECORE_SUCCESS)
904 return rc;
905 } else if (out_params.load_code ==
906 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
907 /* The previous load request set the mailbox blocking */
908 p_hwfn->mcp_info->block_mb_sending = false;
909
910 if (ecore_mcp_can_force_load(in_params.drv_role,
911 out_params.exist_drv_role,
912 p_params->override_force_load)) {
913 DP_INFO(p_hwfn,
914 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
915 in_params.drv_role, in_params.fw_ver,
916 in_params.drv_ver_1, in_params.drv_ver_0,
917 out_params.exist_drv_role,
918 out_params.exist_fw_ver,
919 out_params.exist_drv_ver_1,
920 out_params.exist_drv_ver_0);
921 DP_INFO(p_hwfn, "Sending a force load request\n");
922
923 rc = ecore_get_mfw_force_cmd(p_hwfn,
924 ECORE_LOAD_REQ_FORCE_ALL,
925 &mfw_force_cmd);
926 if (rc != ECORE_SUCCESS)
927 return rc;
928
929 in_params.force_cmd = mfw_force_cmd;
930 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
931 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
932 &out_params);
933 if (rc != ECORE_SUCCESS)
934 return rc;
935 } else {
936 DP_NOTICE(p_hwfn, false,
937 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
938 in_params.drv_role, in_params.fw_ver,
939 in_params.drv_ver_0, in_params.drv_ver_1,
940 out_params.exist_drv_role,
941 out_params.exist_fw_ver,
942 out_params.exist_drv_ver_0,
943 out_params.exist_drv_ver_1);
944 DP_NOTICE(p_hwfn, false,
945 "Avoid sending a force load request to prevent disruption of active PFs\n");
946
947 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
948 return ECORE_BUSY;
949 }
950 }
951
952 /* Now handle the other types of responses.
953 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
954 * expected here after the additional revised load requests were sent.
955 */
956 switch (out_params.load_code) {
957 case FW_MSG_CODE_DRV_LOAD_ENGINE:
958 case FW_MSG_CODE_DRV_LOAD_PORT:
959 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
960 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
961 out_params.drv_exists) {
962 /* The role and fw/driver version match, but the PF is
963 * already loaded and has not been unloaded gracefully.
964 * This is unexpected since a quasi-FLR request was
965 * previously sent as part of ecore_hw_prepare().
966 */
967 DP_NOTICE(p_hwfn, false,
968 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
969 return ECORE_INVAL;
970 }
971 break;
972 case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
973 case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
974 case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
975 case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
976 DP_NOTICE(p_hwfn, false,
977 "MFW refused a load request [resp 0x%08x]. Aborting.\n",
978 out_params.load_code);
979 return ECORE_BUSY;
980 default:
981 DP_NOTICE(p_hwfn, false,
982 "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
983 out_params.load_code);
984 break;
985 }
986
987 p_params->load_code = out_params.load_code;
988
989 return ECORE_SUCCESS;
990 }
991
992 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
993 struct ecore_ptt *p_ptt)
994 {
995 u32 wol_param, mcp_resp, mcp_param;
996
997 switch (p_hwfn->p_dev->wol_config) {
998 case ECORE_OV_WOL_DISABLED:
999 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1000 break;
1001 case ECORE_OV_WOL_ENABLED:
1002 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1003 break;
1004 default:
1005 DP_NOTICE(p_hwfn, true,
1006 "Unknown WoL configuration %02x\n",
1007 p_hwfn->p_dev->wol_config);
1008 /* Fallthrough */
1009 case ECORE_OV_WOL_DEFAULT:
1010 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1011 }
1012
1013 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1014 &mcp_resp, &mcp_param);
1015 }
1016
1017 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1018 struct ecore_ptt *p_ptt)
1019 {
1020 struct ecore_mcp_mb_params mb_params;
1021 struct mcp_mac wol_mac;
1022
1023 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1024 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1025
1026 /* Set the primary MAC if WoL is enabled */
1027 if (p_hwfn->p_dev->wol_config == ECORE_OV_WOL_ENABLED) {
1028 u8 *p_mac = p_hwfn->p_dev->wol_mac;
1029
1030 OSAL_MEM_ZERO(&wol_mac, sizeof(wol_mac));
1031 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1032 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1033 p_mac[4] << 8 | p_mac[5];
1034
1035 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFDOWN),
1036 "Setting WoL MAC: %02x:%02x:%02x:%02x:%02x:%02x --> [%08x,%08x]\n",
1037 p_mac[0], p_mac[1], p_mac[2], p_mac[3], p_mac[4],
1038 p_mac[5], wol_mac.mac_upper, wol_mac.mac_lower);
1039
1040 mb_params.p_data_src = &wol_mac;
1041 mb_params.data_src_size = sizeof(wol_mac);
1042 }
1043
1044 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1045 }
1046
1047 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1048 struct ecore_ptt *p_ptt)
1049 {
1050 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1051 PUBLIC_PATH);
1052 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1053 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1054 ECORE_PATH_ID(p_hwfn));
1055 u32 disabled_vfs[VF_MAX_STATIC / 32];
1056 int i;
1057
1058 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1059 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1060 mfw_path_offsize, path_addr);
1061
1062 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1063 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1064 path_addr +
1065 OFFSETOF(struct public_path,
1066 mcp_vf_disabled) +
1067 sizeof(u32) * i);
1068 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1069 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1070 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1071 }
1072
1073 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1074 OSAL_VF_FLR_UPDATE(p_hwfn);
1075 }
1076
1077 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1078 struct ecore_ptt *p_ptt,
1079 u32 *vfs_to_ack)
1080 {
1081 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1082 PUBLIC_FUNC);
1083 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1084 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1085 MCP_PF_ID(p_hwfn));
1086 struct ecore_mcp_mb_params mb_params;
1087 enum _ecore_status_t rc;
1088 int i;
1089
1090 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1091 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1092 "Acking VFs [%08x,...,%08x] - %08x\n",
1093 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1094
1095 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1096 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1097 mb_params.p_data_src = vfs_to_ack;
1098 mb_params.data_src_size = VF_MAX_STATIC / 8;
1099 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1100 if (rc != ECORE_SUCCESS) {
1101 DP_NOTICE(p_hwfn, false,
1102 "Failed to pass ACK for VF flr to MFW\n");
1103 return ECORE_TIMEOUT;
1104 }
1105
1106 /* TMP - clear the ACK bits; should be done by MFW */
1107 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1108 ecore_wr(p_hwfn, p_ptt,
1109 func_addr +
1110 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1111 i * sizeof(u32), 0);
1112
1113 return rc;
1114 }
1115
1116 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1117 struct ecore_ptt *p_ptt)
1118 {
1119 u32 transceiver_state;
1120
1121 transceiver_state = ecore_rd(p_hwfn, p_ptt,
1122 p_hwfn->mcp_info->port_addr +
1123 OFFSETOF(struct public_port,
1124 transceiver_data));
1125
1126 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1127 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1128 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1129 OFFSETOF(struct public_port,
1130 transceiver_data)));
1131
1132 transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
1133
1134 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1135 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1136 else
1137 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1138 }
1139
1140 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1141 struct ecore_ptt *p_ptt,
1142 struct ecore_mcp_link_state *p_link)
1143 {
1144 u32 eee_status, val;
1145
1146 p_link->eee_adv_caps = 0;
1147 p_link->eee_lp_adv_caps = 0;
1148 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1149 OFFSETOF(struct public_port, eee_status));
1150 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1151 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_SHIFT;
1152 if (val & EEE_1G_ADV)
1153 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1154 if (val & EEE_10G_ADV)
1155 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1156 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_SHIFT;
1157 if (val & EEE_1G_ADV)
1158 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1159 if (val & EEE_10G_ADV)
1160 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1161 }
1162
1163 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1164 struct ecore_ptt *p_ptt,
1165 bool b_reset)
1166 {
1167 struct ecore_mcp_link_state *p_link;
1168 u8 max_bw, min_bw;
1169 u32 status = 0;
1170
1171 /* Prevent SW/attentions from doing this at the same time */
1172 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1173
1174 p_link = &p_hwfn->mcp_info->link_output;
1175 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1176 if (!b_reset) {
1177 status = ecore_rd(p_hwfn, p_ptt,
1178 p_hwfn->mcp_info->port_addr +
1179 OFFSETOF(struct public_port, link_status));
1180 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1181 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1182 status, (u32)(p_hwfn->mcp_info->port_addr +
1183 OFFSETOF(struct public_port, link_status)));
1184 } else {
1185 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1186 "Resetting link indications\n");
1187 goto out;
1188 }
1189
1190 if (p_hwfn->b_drv_link_init)
1191 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1192 else
1193 p_link->link_up = false;
1194
1195 p_link->full_duplex = true;
1196 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1197 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1198 p_link->speed = 100000;
1199 break;
1200 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1201 p_link->speed = 50000;
1202 break;
1203 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1204 p_link->speed = 40000;
1205 break;
1206 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1207 p_link->speed = 25000;
1208 break;
1209 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1210 p_link->speed = 20000;
1211 break;
1212 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1213 p_link->speed = 10000;
1214 break;
1215 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1216 p_link->full_duplex = false;
1217 /* Fall-through */
1218 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1219 p_link->speed = 1000;
1220 break;
1221 default:
1222 p_link->speed = 0;
1223 }
1224
1225 /* We never store total line speed as p_link->speed is
1226 * again changes according to bandwidth allocation.
1227 */
1228 if (p_link->link_up && p_link->speed)
1229 p_link->line_speed = p_link->speed;
1230 else
1231 p_link->line_speed = 0;
1232
1233 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1234 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1235
1236 /* Max bandwidth configuration */
1237 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1238
1239 /* Mintz bandwidth configuration */
1240 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1241 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1242 p_link->min_pf_rate);
1243
1244 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1245 p_link->an_complete = !!(status &
1246 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1247 p_link->parallel_detection = !!(status &
1248 LINK_STATUS_PARALLEL_DETECTION_USED);
1249 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1250
1251 p_link->partner_adv_speed |=
1252 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1253 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1254 p_link->partner_adv_speed |=
1255 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1256 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1257 p_link->partner_adv_speed |=
1258 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1259 ECORE_LINK_PARTNER_SPEED_10G : 0;
1260 p_link->partner_adv_speed |=
1261 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1262 ECORE_LINK_PARTNER_SPEED_20G : 0;
1263 p_link->partner_adv_speed |=
1264 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1265 ECORE_LINK_PARTNER_SPEED_25G : 0;
1266 p_link->partner_adv_speed |=
1267 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1268 ECORE_LINK_PARTNER_SPEED_40G : 0;
1269 p_link->partner_adv_speed |=
1270 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1271 ECORE_LINK_PARTNER_SPEED_50G : 0;
1272 p_link->partner_adv_speed |=
1273 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1274 ECORE_LINK_PARTNER_SPEED_100G : 0;
1275
1276 p_link->partner_tx_flow_ctrl_en =
1277 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1278 p_link->partner_rx_flow_ctrl_en =
1279 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1280
1281 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1282 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1283 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1284 break;
1285 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1286 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1287 break;
1288 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1289 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1290 break;
1291 default:
1292 p_link->partner_adv_pause = 0;
1293 }
1294
1295 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1296
1297 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1298 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1299
1300 OSAL_LINK_UPDATE(p_hwfn);
1301 out:
1302 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1303 }
1304
1305 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1306 struct ecore_ptt *p_ptt,
1307 bool b_up)
1308 {
1309 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1310 struct ecore_mcp_mb_params mb_params;
1311 struct eth_phy_cfg phy_cfg;
1312 enum _ecore_status_t rc = ECORE_SUCCESS;
1313 u32 cmd;
1314
1315 #ifndef ASIC_ONLY
1316 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1317 return ECORE_SUCCESS;
1318 #endif
1319
1320 /* Set the shmem configuration according to params */
1321 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1322 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1323 if (!params->speed.autoneg)
1324 phy_cfg.speed = params->speed.forced_speed;
1325 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1326 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1327 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1328 phy_cfg.adv_speed = params->speed.advertised_speeds;
1329 phy_cfg.loopback_mode = params->loopback_mode;
1330 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
1331 if (params->eee.enable)
1332 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1333 if (params->eee.tx_lpi_enable)
1334 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1335 if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1336 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1337 if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1338 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1339 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1340 EEE_TX_TIMER_USEC_SHIFT) &
1341 EEE_TX_TIMER_USEC_MASK;
1342 }
1343
1344 p_hwfn->b_drv_link_init = b_up;
1345
1346 if (b_up)
1347 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1348 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1349 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1350 phy_cfg.loopback_mode);
1351 else
1352 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1353
1354 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1355 mb_params.cmd = cmd;
1356 mb_params.p_data_src = &phy_cfg;
1357 mb_params.data_src_size = sizeof(phy_cfg);
1358 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1359
1360 /* if mcp fails to respond we must abort */
1361 if (rc != ECORE_SUCCESS) {
1362 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1363 return rc;
1364 }
1365
1366 /* Mimic link-change attention, done for several reasons:
1367 * - On reset, there's no guarantee MFW would trigger
1368 * an attention.
1369 * - On initialization, older MFWs might not indicate link change
1370 * during LFA, so we'll never get an UP indication.
1371 */
1372 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1373
1374 return rc;
1375 }
1376
1377 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1378 struct ecore_ptt *p_ptt)
1379 {
1380 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1381
1382 /* TODO - Add support for VFs */
1383 if (IS_VF(p_hwfn->p_dev))
1384 return ECORE_INVAL;
1385
1386 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1387 PUBLIC_PATH);
1388 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1389 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1390
1391 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1392 path_addr +
1393 OFFSETOF(struct public_path, process_kill)) &
1394 PROCESS_KILL_COUNTER_MASK;
1395
1396 return proc_kill_cnt;
1397 }
1398
1399 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1400 struct ecore_ptt *p_ptt)
1401 {
1402 struct ecore_dev *p_dev = p_hwfn->p_dev;
1403 u32 proc_kill_cnt;
1404
1405 /* Prevent possible attentions/interrupts during the recovery handling
1406 * and till its load phase, during which they will be re-enabled.
1407 */
1408 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1409
1410 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1411
1412 /* The following operations should be done once, and thus in CMT mode
1413 * are carried out by only the first HW function.
1414 */
1415 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1416 return;
1417
1418 if (p_dev->recov_in_prog) {
1419 DP_NOTICE(p_hwfn, false,
1420 "Ignoring the indication since a recovery process is already in progress\n");
1421 return;
1422 }
1423
1424 p_dev->recov_in_prog = true;
1425
1426 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1427 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1428
1429 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1430 }
1431
1432 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1433 struct ecore_ptt *p_ptt,
1434 enum MFW_DRV_MSG_TYPE type)
1435 {
1436 enum ecore_mcp_protocol_type stats_type;
1437 union ecore_mcp_protocol_stats stats;
1438 struct ecore_mcp_mb_params mb_params;
1439 u32 hsi_param;
1440 enum _ecore_status_t rc;
1441
1442 switch (type) {
1443 case MFW_DRV_MSG_GET_LAN_STATS:
1444 stats_type = ECORE_MCP_LAN_STATS;
1445 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1446 break;
1447 case MFW_DRV_MSG_GET_FCOE_STATS:
1448 stats_type = ECORE_MCP_FCOE_STATS;
1449 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1450 break;
1451 case MFW_DRV_MSG_GET_ISCSI_STATS:
1452 stats_type = ECORE_MCP_ISCSI_STATS;
1453 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1454 break;
1455 case MFW_DRV_MSG_GET_RDMA_STATS:
1456 stats_type = ECORE_MCP_RDMA_STATS;
1457 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1458 break;
1459 default:
1460 DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
1461 return;
1462 }
1463
1464 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1465
1466 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1467 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1468 mb_params.param = hsi_param;
1469 mb_params.p_data_src = &stats;
1470 mb_params.data_src_size = sizeof(stats);
1471 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1472 if (rc != ECORE_SUCCESS)
1473 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1474 }
1475
1476 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1477 struct public_func *p_shmem_info)
1478 {
1479 struct ecore_mcp_function_info *p_info;
1480
1481 p_info = &p_hwfn->mcp_info->func_info;
1482
1483 /* TODO - bandwidth min/max should have valid values of 1-100,
1484 * as well as some indication that the feature is disabled.
1485 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1486 * limit and correct value to min `1' and max `100' if limit isn't in
1487 * range.
1488 */
1489 p_info->bandwidth_min = (p_shmem_info->config &
1490 FUNC_MF_CFG_MIN_BW_MASK) >>
1491 FUNC_MF_CFG_MIN_BW_SHIFT;
1492 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1493 DP_INFO(p_hwfn,
1494 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1495 p_info->bandwidth_min);
1496 p_info->bandwidth_min = 1;
1497 }
1498
1499 p_info->bandwidth_max = (p_shmem_info->config &
1500 FUNC_MF_CFG_MAX_BW_MASK) >>
1501 FUNC_MF_CFG_MAX_BW_SHIFT;
1502 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1503 DP_INFO(p_hwfn,
1504 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1505 p_info->bandwidth_max);
1506 p_info->bandwidth_max = 100;
1507 }
1508 }
1509
1510 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1511 struct ecore_ptt *p_ptt,
1512 struct public_func *p_data,
1513 int pfid)
1514 {
1515 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1516 PUBLIC_FUNC);
1517 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1518 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1519 u32 i, size;
1520
1521 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1522
1523 size = OSAL_MIN_T(u32, sizeof(*p_data),
1524 SECTION_SIZE(mfw_path_offsize));
1525 for (i = 0; i < size / sizeof(u32); i++)
1526 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1527 func_addr + (i << 2));
1528
1529 return size;
1530 }
1531 #if 0
1532 /* This was introduced with FW 8.10.5.0; Hopefully this is only temp. */
1533 enum _ecore_status_t ecore_hw_init_first_eth(struct ecore_hwfn *p_hwfn,
1534 struct ecore_ptt *p_ptt,
1535 u8 *p_pf)
1536 {
1537 struct public_func shmem_info;
1538 int i;
1539
1540 /* Find first Ethernet interface in port */
1541 for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->p_dev);
1542 i += p_hwfn->p_dev->num_ports_in_engine) {
1543 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1544 MCP_PF_ID_BY_REL(p_hwfn, i));
1545
1546 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1547 continue;
1548
1549 if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
1550 FUNC_MF_CFG_PROTOCOL_ETHERNET) {
1551 *p_pf = (u8)i;
1552 return ECORE_SUCCESS;
1553 }
1554 }
1555
1556 /* This might actually be valid somewhere in the future but for now
1557 * it's highly unlikely.
1558 */
1559 DP_NOTICE(p_hwfn, false,
1560 "Failed to find on port an ethernet interface in MF_SI mode\n");
1561
1562 return ECORE_INVAL;
1563 }
1564 #endif
1565 static void
1566 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1567 {
1568 struct ecore_mcp_function_info *p_info;
1569 struct public_func shmem_info;
1570 u32 resp = 0, param = 0;
1571
1572 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1573 MCP_PF_ID(p_hwfn));
1574
1575 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1576
1577 p_info = &p_hwfn->mcp_info->func_info;
1578
1579 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1580
1581 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1582
1583 /* Acknowledge the MFW */
1584 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1585 ¶m);
1586 }
1587
1588 static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
1589 struct ecore_ptt *p_ptt)
1590 {
1591 struct public_func shmem_info;
1592 u32 resp = 0, param = 0;
1593
1594 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1595 MCP_PF_ID(p_hwfn));
1596
1597 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1598 FUNC_MF_CFG_OV_STAG_MASK;
1599 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1600 if ((p_hwfn->hw_info.hw_mode & (1 << MODE_MF_SD)) &&
1601 (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET)) {
1602 ecore_wr(p_hwfn, p_ptt,
1603 NIG_REG_LLH_FUNC_TAG_VALUE,
1604 p_hwfn->hw_info.ovlan);
1605 ecore_sp_pf_update_stag(p_hwfn);
1606 }
1607
1608 OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
1609
1610 /* Acknowledge the MFW */
1611 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1612 &resp, ¶m);
1613 }
1614
1615 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1616 struct ecore_ptt *p_ptt)
1617 {
1618 /* A single notification should be sent to upper driver in CMT mode */
1619 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1620 return;
1621
1622 DP_NOTICE(p_hwfn, false,
1623 "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1624
1625 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1626 }
1627
1628 struct ecore_mdump_cmd_params {
1629 u32 cmd;
1630 void *p_data_src;
1631 u8 data_src_size;
1632 void *p_data_dst;
1633 u8 data_dst_size;
1634 u32 mcp_resp;
1635 };
1636
1637 static enum _ecore_status_t
1638 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1639 struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1640 {
1641 struct ecore_mcp_mb_params mb_params;
1642 enum _ecore_status_t rc;
1643
1644 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1645 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1646 mb_params.param = p_mdump_cmd_params->cmd;
1647 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1648 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1649 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1650 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1651 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1652 if (rc != ECORE_SUCCESS)
1653 return rc;
1654
1655 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1656
1657 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1658 DP_INFO(p_hwfn,
1659 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1660 p_mdump_cmd_params->cmd);
1661 rc = ECORE_NOTIMPL;
1662 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1663 DP_INFO(p_hwfn,
1664 "The mdump command is not supported by the MFW\n");
1665 rc = ECORE_NOTIMPL;
1666 }
1667
1668 return rc;
1669 }
1670
1671 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1672 struct ecore_ptt *p_ptt)
1673 {
1674 struct ecore_mdump_cmd_params mdump_cmd_params;
1675
1676 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1677 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1678
1679 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1680 }
1681
1682 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1683 struct ecore_ptt *p_ptt,
1684 u32 epoch)
1685 {
1686 struct ecore_mdump_cmd_params mdump_cmd_params;
1687
1688 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1689 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1690 mdump_cmd_params.p_data_src = &epoch;
1691 mdump_cmd_params.data_src_size = sizeof(epoch);
1692
1693 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1694 }
1695
1696 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1697 struct ecore_ptt *p_ptt)
1698 {
1699 struct ecore_mdump_cmd_params mdump_cmd_params;
1700
1701 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1702 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1703
1704 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1705 }
1706
1707 static enum _ecore_status_t
1708 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1709 struct mdump_config_stc *p_mdump_config)
1710 {
1711 struct ecore_mdump_cmd_params mdump_cmd_params;
1712 enum _ecore_status_t rc;
1713
1714 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1715 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1716 mdump_cmd_params.p_data_dst = p_mdump_config;
1717 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1718
1719 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1720 if (rc != ECORE_SUCCESS)
1721 return rc;
1722
1723 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1724 DP_INFO(p_hwfn,
1725 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1726 mdump_cmd_params.mcp_resp);
1727 rc = ECORE_UNKNOWN_ERROR;
1728 }
1729
1730 return rc;
1731 }
1732
1733 enum _ecore_status_t
1734 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1735 struct ecore_mdump_info *p_mdump_info)
1736 {
1737 u32 addr, global_offsize, global_addr;
1738 struct mdump_config_stc mdump_config;
1739 enum _ecore_status_t rc;
1740
1741 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1742
1743 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1744 PUBLIC_GLOBAL);
1745 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1746 global_addr = SECTION_ADDR(global_offsize, 0);
1747 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1748 global_addr +
1749 OFFSETOF(struct public_global,
1750 mdump_reason));
1751
1752 if (p_mdump_info->reason) {
1753 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1754 if (rc != ECORE_SUCCESS)
1755 return rc;
1756
1757 p_mdump_info->version = mdump_config.version;
1758 p_mdump_info->config = mdump_config.config;
1759 p_mdump_info->epoch = mdump_config.epoc;
1760 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1761 p_mdump_info->valid_logs = mdump_config.valid_logs;
1762
1763 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1764 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1765 p_mdump_info->reason, p_mdump_info->version,
1766 p_mdump_info->config, p_mdump_info->epoch,
1767 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1768 } else {
1769 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1770 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1771 }
1772
1773 return ECORE_SUCCESS;
1774 }
1775
1776 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1777 struct ecore_ptt *p_ptt)
1778 {
1779 struct ecore_mdump_cmd_params mdump_cmd_params;
1780
1781 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1782 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1783
1784 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1785 }
1786
1787 enum _ecore_status_t
1788 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1789 struct ecore_mdump_retain_data *p_mdump_retain)
1790 {
1791 struct ecore_mdump_cmd_params mdump_cmd_params;
1792 struct mdump_retain_data_stc mfw_mdump_retain;
1793 enum _ecore_status_t rc;
1794
1795 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1796 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1797 mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1798 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1799
1800 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1801 if (rc != ECORE_SUCCESS)
1802 return rc;
1803
1804 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1805 DP_INFO(p_hwfn,
1806 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1807 mdump_cmd_params.mcp_resp);
1808 return ECORE_UNKNOWN_ERROR;
1809 }
1810
1811 p_mdump_retain->valid = mfw_mdump_retain.valid;
1812 p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1813 p_mdump_retain->pf = mfw_mdump_retain.pf;
1814 p_mdump_retain->status = mfw_mdump_retain.status;
1815
1816 return ECORE_SUCCESS;
1817 }
1818
1819 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1820 struct ecore_ptt *p_ptt)
1821 {
1822 struct ecore_mdump_cmd_params mdump_cmd_params;
1823
1824 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1825 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1826
1827 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1828 }
1829
1830 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1831 struct ecore_ptt *p_ptt)
1832 {
1833 struct ecore_mdump_retain_data mdump_retain;
1834 enum _ecore_status_t rc;
1835
1836 /* In CMT mode - no need for more than a single acknowledgement to the
1837 * MFW, and no more than a single notification to the upper driver.
1838 */
1839 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1840 return;
1841
1842 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1843 if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1844 DP_NOTICE(p_hwfn, false,
1845 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1846 mdump_retain.epoch, mdump_retain.pf,
1847 mdump_retain.status);
1848 } else {
1849 DP_NOTICE(p_hwfn, false,
1850 "The MFW notified that a critical error occurred in the device\n");
1851 }
1852
1853 if (p_hwfn->p_dev->allow_mdump) {
1854 DP_NOTICE(p_hwfn, false,
1855 "Not acknowledging the notification to allow the MFW crash dump\n");
1856 return;
1857 }
1858
1859 DP_NOTICE(p_hwfn, false,
1860 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1861 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1862 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1863 }
1864
1865 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1866 struct ecore_ptt *p_ptt)
1867 {
1868 struct ecore_mcp_info *info = p_hwfn->mcp_info;
1869 enum _ecore_status_t rc = ECORE_SUCCESS;
1870 bool found = false;
1871 u16 i;
1872
1873 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1874
1875 /* Read Messages from MFW */
1876 ecore_mcp_read_mb(p_hwfn, p_ptt);
1877
1878 /* Compare current messages to old ones */
1879 for (i = 0; i < info->mfw_mb_length; i++) {
1880 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1881 continue;
1882
1883 found = true;
1884
1885 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1886 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1887 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1888
1889 switch (i) {
1890 case MFW_DRV_MSG_LINK_CHANGE:
1891 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1892 break;
1893 case MFW_DRV_MSG_VF_DISABLED:
1894 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1895 break;
1896 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1897 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1898 ECORE_DCBX_REMOTE_LLDP_MIB);
1899 break;
1900 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1901 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1902 ECORE_DCBX_REMOTE_MIB);
1903 break;
1904 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1905 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1906 ECORE_DCBX_OPERATIONAL_MIB);
1907 break;
1908 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1909 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1910 break;
1911 case MFW_DRV_MSG_ERROR_RECOVERY:
1912 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1913 break;
1914 case MFW_DRV_MSG_GET_LAN_STATS:
1915 case MFW_DRV_MSG_GET_FCOE_STATS:
1916 case MFW_DRV_MSG_GET_ISCSI_STATS:
1917 case MFW_DRV_MSG_GET_RDMA_STATS:
1918 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1919 break;
1920 case MFW_DRV_MSG_BW_UPDATE:
1921 ecore_mcp_update_bw(p_hwfn, p_ptt);
1922 break;
1923 case MFW_DRV_MSG_S_TAG_UPDATE:
1924 ecore_mcp_update_stag(p_hwfn, p_ptt);
1925 break;
1926 case MFW_DRV_MSG_FAILURE_DETECTED:
1927 ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1928 break;
1929 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1930 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1931 break;
1932 case MFW_DRV_MSG_GET_TLV_REQ:
1933 OSAL_MFW_TLV_REQ(p_hwfn);
1934 break;
1935 default:
1936 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1937 rc = ECORE_INVAL;
1938 }
1939 }
1940
1941 /* ACK everything */
1942 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1943 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1944
1945 /* MFW expect answer in BE, so we force write in that format */
1946 ecore_wr(p_hwfn, p_ptt,
1947 info->mfw_mb_addr + sizeof(u32) +
1948 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1949 sizeof(u32) + i * sizeof(u32), val);
1950 }
1951
1952 if (!found) {
1953 DP_NOTICE(p_hwfn, false,
1954 "Received an MFW message indication but no new message!\n");
1955 rc = ECORE_INVAL;
1956 }
1957
1958 /* Copy the new mfw messages into the shadow */
1959 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1960
1961 return rc;
1962 }
1963
1964 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1965 struct ecore_ptt *p_ptt,
1966 u32 *p_mfw_ver,
1967 u32 *p_running_bundle_id)
1968 {
1969 u32 global_offsize;
1970
1971 #ifndef ASIC_ONLY
1972 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1973 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1974 return ECORE_SUCCESS;
1975 }
1976 #endif
1977
1978 if (IS_VF(p_hwfn->p_dev)) {
1979 if (p_hwfn->vf_iov_info) {
1980 struct pfvf_acquire_resp_tlv *p_resp;
1981
1982 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1983 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1984 return ECORE_SUCCESS;
1985 } else {
1986 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1987 "VF requested MFW version prior to ACQUIRE\n");
1988 return ECORE_INVAL;
1989 }
1990 }
1991
1992 global_offsize = ecore_rd(p_hwfn, p_ptt,
1993 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1994 PUBLIC_GLOBAL));
1995 *p_mfw_ver = ecore_rd(p_hwfn, p_ptt,
1996 SECTION_ADDR(global_offsize, 0) +
1997 OFFSETOF(struct public_global, mfw_ver));
1998
1999 if (p_running_bundle_id != OSAL_NULL) {
2000 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2001 SECTION_ADDR(global_offsize, 0) +
2002 OFFSETOF(struct public_global,
2003 running_bundle_id));
2004 }
2005
2006 return ECORE_SUCCESS;
2007 }
2008
2009 enum _ecore_status_t ecore_mcp_get_mbi_ver(struct ecore_hwfn *p_hwfn,
2010 struct ecore_ptt *p_ptt,
2011 u32 *p_mbi_ver)
2012 {
2013 u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
2014
2015 #ifndef ASIC_ONLY
2016 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2017 DP_NOTICE(p_hwfn, false, "Emulation - can't get MBI version\n");
2018 return ECORE_SUCCESS;
2019 }
2020 #endif
2021
2022 if (IS_VF(p_hwfn->p_dev))
2023 return ECORE_INVAL;
2024
2025 /* Read the address of the nvm_cfg */
2026 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2027 if (!nvm_cfg_addr) {
2028 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
2029 return ECORE_INVAL;
2030 }
2031
2032 /* Read the offset of nvm_cfg1 */
2033 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2034
2035 mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2036 OFFSETOF(struct nvm_cfg1, glob) +
2037 OFFSETOF(struct nvm_cfg1_glob, mbi_version);
2038 *p_mbi_ver = ecore_rd(p_hwfn, p_ptt, mbi_ver_addr) &
2039 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2040 NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2041 NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2042
2043 return ECORE_SUCCESS;
2044 }
2045
2046 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
2047 u32 *p_media_type)
2048 {
2049 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
2050 struct ecore_ptt *p_ptt;
2051
2052 /* TODO - Add support for VFs */
2053 if (IS_VF(p_dev))
2054 return ECORE_INVAL;
2055
2056 if (!ecore_mcp_is_init(p_hwfn)) {
2057 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
2058 return ECORE_BUSY;
2059 }
2060
2061 *p_media_type = MEDIA_UNSPECIFIED;
2062
2063 p_ptt = ecore_ptt_acquire(p_hwfn);
2064 if (!p_ptt)
2065 return ECORE_BUSY;
2066
2067 *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2068 OFFSETOF(struct public_port, media_type));
2069
2070 ecore_ptt_release(p_hwfn, p_ptt);
2071
2072 return ECORE_SUCCESS;
2073 }
2074
2075 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2076 static void
2077 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2078 enum ecore_pci_personality *p_proto)
2079 {
2080 /* There wasn't ever a legacy MFW that published iwarp.
2081 * So at this point, this is either plain l2 or RoCE.
2082 */
2083 if (OSAL_TEST_BIT(ECORE_DEV_CAP_ROCE,
2084 &p_hwfn->hw_info.device_capabilities))
2085 *p_proto = ECORE_PCI_ETH_ROCE;
2086 else
2087 *p_proto = ECORE_PCI_ETH;
2088
2089 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2090 "According to Legacy capabilities, L2 personality is %08x\n",
2091 (u32) *p_proto);
2092 }
2093
2094 static enum _ecore_status_t
2095 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2096 struct ecore_ptt *p_ptt,
2097 enum ecore_pci_personality *p_proto)
2098 {
2099 u32 resp = 0, param = 0;
2100 enum _ecore_status_t rc;
2101
2102 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2103 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m);
2104 if (rc != ECORE_SUCCESS)
2105 return rc;
2106 if (resp != FW_MSG_CODE_OK) {
2107 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2108 "MFW lacks support for command; Returns %08x\n",
2109 resp);
2110 return ECORE_INVAL;
2111 }
2112
2113 switch (param) {
2114 case FW_MB_PARAM_GET_PF_RDMA_NONE:
2115 *p_proto = ECORE_PCI_ETH;
2116 break;
2117 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2118 *p_proto = ECORE_PCI_ETH_ROCE;
2119 break;
2120 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2121 *p_proto = ECORE_PCI_ETH_IWARP;
2122 break;
2123 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2124 *p_proto = ECORE_PCI_ETH_RDMA;
2125 break;
2126 default:
2127 DP_NOTICE(p_hwfn, true,
2128 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2129 param);
2130 return ECORE_INVAL;
2131 }
2132
2133 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2134 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2135 (u32) *p_proto, resp, param);
2136 return ECORE_SUCCESS;
2137 }
2138
2139 static enum _ecore_status_t
2140 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2141 struct public_func *p_info,
2142 struct ecore_ptt *p_ptt,
2143 enum ecore_pci_personality *p_proto)
2144 {
2145 enum _ecore_status_t rc = ECORE_SUCCESS;
2146
2147 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2148 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2149 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2150 ECORE_SUCCESS)
2151 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2152 break;
2153 case FUNC_MF_CFG_PROTOCOL_ISCSI:
2154 *p_proto = ECORE_PCI_ISCSI;
2155 break;
2156 case FUNC_MF_CFG_PROTOCOL_FCOE:
2157 *p_proto = ECORE_PCI_FCOE;
2158 break;
2159 case FUNC_MF_CFG_PROTOCOL_ROCE:
2160 DP_NOTICE(p_hwfn, true, "RoCE personality is not a valid value!\n");
2161 rc = ECORE_INVAL;
2162 break;
2163 default:
2164 rc = ECORE_INVAL;
2165 }
2166
2167 return rc;
2168 }
2169
2170 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2171 struct ecore_ptt *p_ptt)
2172 {
2173 struct ecore_mcp_function_info *info;
2174 struct public_func shmem_info;
2175
2176 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2177 MCP_PF_ID(p_hwfn));
2178 info = &p_hwfn->mcp_info->func_info;
2179
2180 info->pause_on_host = (shmem_info.config &
2181 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2182
2183 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2184 &info->protocol)) {
2185 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2186 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2187 return ECORE_INVAL;
2188 }
2189
2190 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2191
2192 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2193 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2194 info->mac[1] = (u8)(shmem_info.mac_upper);
2195 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2196 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2197 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2198 info->mac[5] = (u8)(shmem_info.mac_lower);
2199
2200 /* Store primary MAC for later possible WoL */
2201 OSAL_MEMCPY(&p_hwfn->p_dev->wol_mac, info->mac, ETH_ALEN);
2202
2203 } else {
2204 /* TODO - are there protocols for which there's no MAC? */
2205 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2206 }
2207
2208 /* TODO - are these calculations true for BE machine? */
2209 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2210 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2211 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2212 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2213
2214 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2215
2216 info->mtu = (u16)shmem_info.mtu_size;
2217
2218 p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_NONE;
2219 if (ecore_mcp_is_init(p_hwfn)) {
2220 u32 resp = 0, param = 0;
2221 enum _ecore_status_t rc;
2222
2223 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2224 DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m);
2225 if (rc != ECORE_SUCCESS)
2226 return rc;
2227 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2228 p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_PME;
2229 }
2230 p_hwfn->p_dev->wol_config = (u8)ECORE_OV_WOL_DEFAULT;
2231
2232 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2233 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2234 info->pause_on_host, info->protocol,
2235 info->bandwidth_min, info->bandwidth_max,
2236 info->mac[0], info->mac[1], info->mac[2],
2237 info->mac[3], info->mac[4], info->mac[5],
2238 info->wwn_port, info->wwn_node, info->ovlan,
2239 (u8)p_hwfn->hw_info.b_wol_support);
2240
2241 return ECORE_SUCCESS;
2242 }
2243
2244 struct ecore_mcp_link_params
2245 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2246 {
2247 if (!p_hwfn || !p_hwfn->mcp_info)
2248 return OSAL_NULL;
2249 return &p_hwfn->mcp_info->link_input;
2250 }
2251
2252 struct ecore_mcp_link_state
2253 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2254 {
2255 if (!p_hwfn || !p_hwfn->mcp_info)
2256 return OSAL_NULL;
2257
2258 #ifndef ASIC_ONLY
2259 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2260 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2261 p_hwfn->mcp_info->link_output.link_up = true;
2262 }
2263 #endif
2264
2265 return &p_hwfn->mcp_info->link_output;
2266 }
2267
2268 struct ecore_mcp_link_capabilities
2269 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2270 {
2271 if (!p_hwfn || !p_hwfn->mcp_info)
2272 return OSAL_NULL;
2273 return &p_hwfn->mcp_info->link_capabilities;
2274 }
2275
2276 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2277 struct ecore_ptt *p_ptt)
2278 {
2279 u32 resp = 0, param = 0;
2280 enum _ecore_status_t rc;
2281
2282 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2283 DRV_MSG_CODE_NIG_DRAIN, 1000,
2284 &resp, ¶m);
2285
2286 /* Wait for the drain to complete before returning */
2287 OSAL_MSLEEP(1020);
2288
2289 return rc;
2290 }
2291
2292 #ifndef LINUX_REMOVE
2293 const struct ecore_mcp_function_info
2294 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2295 {
2296 if (!p_hwfn || !p_hwfn->mcp_info)
2297 return OSAL_NULL;
2298 return &p_hwfn->mcp_info->func_info;
2299 }
2300 #endif
2301
2302 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
2303 struct ecore_ptt *p_ptt,
2304 struct ecore_mcp_nvm_params *params)
2305 {
2306 enum _ecore_status_t rc;
2307
2308 switch (params->type) {
2309 case ECORE_MCP_NVM_RD:
2310 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
2311 params->nvm_common.offset,
2312 ¶ms->nvm_common.resp,
2313 ¶ms->nvm_common.param,
2314 params->nvm_rd.buf_size,
2315 params->nvm_rd.buf);
2316 break;
2317 case ECORE_MCP_CMD:
2318 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
2319 params->nvm_common.offset,
2320 ¶ms->nvm_common.resp,
2321 ¶ms->nvm_common.param);
2322 break;
2323 case ECORE_MCP_NVM_WR:
2324 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
2325 params->nvm_common.offset,
2326 ¶ms->nvm_common.resp,
2327 ¶ms->nvm_common.param,
2328 params->nvm_wr.buf_size,
2329 params->nvm_wr.buf);
2330 break;
2331 default:
2332 rc = ECORE_NOTIMPL;
2333 break;
2334 }
2335 return rc;
2336 }
2337
2338 #ifndef LINUX_REMOVE
2339 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2340 struct ecore_ptt *p_ptt,
2341 u32 personalities)
2342 {
2343 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2344 struct public_func shmem_info;
2345 int i, count = 0, num_pfs;
2346
2347 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2348
2349 for (i = 0; i < num_pfs; i++) {
2350 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2351 MCP_PF_ID_BY_REL(p_hwfn, i));
2352 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2353 continue;
2354
2355 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2356 &protocol) !=
2357 ECORE_SUCCESS)
2358 continue;
2359
2360 if ((1 << ((u32)protocol)) & personalities)
2361 count++;
2362 }
2363
2364 return count;
2365 }
2366 #endif
2367
2368 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2369 struct ecore_ptt *p_ptt,
2370 u32 *p_flash_size)
2371 {
2372 u32 flash_size;
2373
2374 #ifndef ASIC_ONLY
2375 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2376 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2377 return ECORE_INVAL;
2378 }
2379 #endif
2380
2381 if (IS_VF(p_hwfn->p_dev))
2382 return ECORE_INVAL;
2383
2384 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2385 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2386 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2387 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
2388
2389 *p_flash_size = flash_size;
2390
2391 return ECORE_SUCCESS;
2392 }
2393
2394 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2395 struct ecore_ptt *p_ptt)
2396 {
2397 struct ecore_dev *p_dev = p_hwfn->p_dev;
2398
2399 if (p_dev->recov_in_prog) {
2400 DP_NOTICE(p_hwfn, false,
2401 "Avoid triggering a recovery since such a process is already in progress\n");
2402 return ECORE_AGAIN;
2403 }
2404
2405 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2406 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2407
2408 return ECORE_SUCCESS;
2409 }
2410
2411 static enum _ecore_status_t
2412 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2413 struct ecore_ptt *p_ptt,
2414 u8 vf_id, u8 num)
2415 {
2416 u32 resp = 0, param = 0, rc_param = 0;
2417 enum _ecore_status_t rc;
2418
2419 /* Only Leader can configure MSIX, and need to take CMT into account */
2420 if (!IS_LEAD_HWFN(p_hwfn))
2421 return ECORE_SUCCESS;
2422 num *= p_hwfn->p_dev->num_hwfns;
2423
2424 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2425 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2426 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2427 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2428
2429 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2430 &resp, &rc_param);
2431
2432 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2433 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2434 vf_id);
2435 rc = ECORE_INVAL;
2436 } else {
2437 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2438 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2439 num, vf_id);
2440 }
2441
2442 return rc;
2443 }
2444
2445 static enum _ecore_status_t
2446 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2447 struct ecore_ptt *p_ptt,
2448 u8 num)
2449 {
2450 u32 resp = 0, param = num, rc_param = 0;
2451 enum _ecore_status_t rc;
2452
2453 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2454 param, &resp, &rc_param);
2455
2456 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2457 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2458 rc = ECORE_INVAL;
2459 } else {
2460 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2461 "Requested 0x%02x MSI-x interrupts for VFs\n",
2462 num);
2463 }
2464
2465 return rc;
2466 }
2467
2468 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2469 struct ecore_ptt *p_ptt,
2470 u8 vf_id, u8 num)
2471 {
2472 if (ECORE_IS_BB(p_hwfn->p_dev))
2473 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2474 else
2475 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2476 }
2477
2478 enum _ecore_status_t
2479 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2480 struct ecore_mcp_drv_version *p_ver)
2481 {
2482 struct ecore_mcp_mb_params mb_params;
2483 struct drv_version_stc drv_version;
2484 u32 num_words, i;
2485 void *p_name;
2486 OSAL_BE32 val;
2487 enum _ecore_status_t rc;
2488
2489 #ifndef ASIC_ONLY
2490 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2491 return ECORE_SUCCESS;
2492 #endif
2493
2494 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2495 drv_version.version = p_ver->version;
2496 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2497 for (i = 0; i < num_words; i++) {
2498 /* The driver name is expected to be in a big-endian format */
2499 p_name = &p_ver->name[i * sizeof(u32)];
2500 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2501 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2502 }
2503
2504 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2505 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2506 mb_params.p_data_src = &drv_version;
2507 mb_params.data_src_size = sizeof(drv_version);
2508 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2509 if (rc != ECORE_SUCCESS)
2510 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2511
2512 return rc;
2513 }
2514
2515 /* A maximal 100 msec waiting time for the MCP to halt */
2516 #define ECORE_MCP_HALT_SLEEP_MS 10
2517 #define ECORE_MCP_HALT_MAX_RETRIES 10
2518
2519 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2520 struct ecore_ptt *p_ptt)
2521 {
2522 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2523 enum _ecore_status_t rc;
2524
2525 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2526 ¶m);
2527 if (rc != ECORE_SUCCESS) {
2528 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2529 return rc;
2530 }
2531
2532 do {
2533 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
2534 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2535 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2536 break;
2537 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
2538
2539 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
2540 DP_NOTICE(p_hwfn, false,
2541 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2542 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2543 return ECORE_BUSY;
2544 }
2545
2546 return ECORE_SUCCESS;
2547 }
2548
2549 #define ECORE_MCP_RESUME_SLEEP_MS 10
2550
2551 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2552 struct ecore_ptt *p_ptt)
2553 {
2554 u32 cpu_mode, cpu_state;
2555
2556 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2557
2558 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2559 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2560 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2561
2562 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
2563 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2564
2565 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2566 DP_NOTICE(p_hwfn, false,
2567 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2568 cpu_mode, cpu_state);
2569 return ECORE_BUSY;
2570 }
2571
2572 return ECORE_SUCCESS;
2573 }
2574
2575 enum _ecore_status_t
2576 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2577 struct ecore_ptt *p_ptt,
2578 enum ecore_ov_client client)
2579 {
2580 enum _ecore_status_t rc;
2581 u32 resp = 0, param = 0;
2582 u32 drv_mb_param;
2583
2584 switch (client) {
2585 case ECORE_OV_CLIENT_DRV:
2586 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2587 break;
2588 case ECORE_OV_CLIENT_USER:
2589 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2590 break;
2591 case ECORE_OV_CLIENT_VENDOR_SPEC:
2592 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2593 break;
2594 default:
2595 DP_NOTICE(p_hwfn, true,
2596 "Invalid client type %d\n", client);
2597 return ECORE_INVAL;
2598 }
2599
2600 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2601 drv_mb_param, &resp, ¶m);
2602 if (rc != ECORE_SUCCESS)
2603 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2604
2605 return rc;
2606 }
2607
2608 enum _ecore_status_t
2609 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2610 struct ecore_ptt *p_ptt,
2611 enum ecore_ov_driver_state drv_state)
2612 {
2613 enum _ecore_status_t rc;
2614 u32 resp = 0, param = 0;
2615 u32 drv_mb_param;
2616
2617 switch (drv_state) {
2618 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2619 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2620 break;
2621 case ECORE_OV_DRIVER_STATE_DISABLED:
2622 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2623 break;
2624 case ECORE_OV_DRIVER_STATE_ACTIVE:
2625 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2626 break;
2627 default:
2628 DP_NOTICE(p_hwfn, true,
2629 "Invalid driver state %d\n", drv_state);
2630 return ECORE_INVAL;
2631 }
2632
2633 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2634 drv_mb_param, &resp, ¶m);
2635 if (rc != ECORE_SUCCESS)
2636 DP_ERR(p_hwfn, "Failed to send driver state\n");
2637
2638 return rc;
2639 }
2640
2641 enum _ecore_status_t
2642 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2643 struct ecore_fc_npiv_tbl *p_table)
2644 {
2645 enum _ecore_status_t rc = ECORE_SUCCESS;
2646 struct dci_fc_npiv_tbl *p_npiv_table;
2647 u8 *p_buf = OSAL_NULL;
2648 u32 addr, size, i;
2649
2650 p_table->num_wwpn = 0;
2651 p_table->num_wwnn = 0;
2652 addr = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2653 OFFSETOF(struct public_port, fc_npiv_nvram_tbl_addr));
2654 if (addr == NPIV_TBL_INVALID_ADDR) {
2655 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table doesn't exist\n");
2656 return rc;
2657 }
2658
2659 size = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2660 OFFSETOF(struct public_port, fc_npiv_nvram_tbl_size));
2661 if (!size) {
2662 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table is empty\n");
2663 return rc;
2664 }
2665
2666 p_buf = OSAL_VZALLOC(p_hwfn->p_dev, size);
2667 if (!p_buf) {
2668 DP_ERR(p_hwfn, "Buffer allocation failed\n");
2669 return ECORE_NOMEM;
2670 }
2671
2672 rc = ecore_mcp_nvm_read(p_hwfn->p_dev, addr, p_buf, size);
2673 if (rc != ECORE_SUCCESS) {
2674 OSAL_VFREE(p_hwfn->p_dev, p_buf);
2675 return rc;
2676 }
2677
2678 p_npiv_table = (struct dci_fc_npiv_tbl *)p_buf;
2679 p_table->num_wwpn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
2680 p_table->num_wwnn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
2681 for (i = 0; i < p_table->num_wwpn; i++) {
2682 OSAL_MEMCPY(p_table->wwpn, p_npiv_table->settings[i].npiv_wwpn,
2683 ECORE_WWN_SIZE);
2684 OSAL_MEMCPY(p_table->wwnn, p_npiv_table->settings[i].npiv_wwnn,
2685 ECORE_WWN_SIZE);
2686 }
2687
2688 OSAL_VFREE(p_hwfn->p_dev, p_buf);
2689
2690 return ECORE_SUCCESS;
2691 }
2692
2693 enum _ecore_status_t
2694 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2695 u16 mtu)
2696 {
2697 enum _ecore_status_t rc;
2698 u32 resp = 0, param = 0;
2699 u32 drv_mb_param;
2700
2701 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2702 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2703 drv_mb_param, &resp, ¶m);
2704 if (rc != ECORE_SUCCESS)
2705 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2706
2707 return rc;
2708 }
2709
2710 enum _ecore_status_t
2711 ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2712 u8 *mac)
2713 {
2714 struct ecore_mcp_mb_params mb_params;
2715 enum _ecore_status_t rc;
2716 u32 mfw_mac[2];
2717
2718 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2719 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2720 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2721 DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2722 mb_params.param |= MCP_PF_ID(p_hwfn);
2723
2724 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2725 * in 32-bit granularity.
2726 * So the MAC has to be set in native order [and not byte order],
2727 * otherwise it would be read incorrectly by MFW after swap.
2728 */
2729 mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2730 mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2731
2732 mb_params.p_data_src = (u8 *)mfw_mac;
2733 mb_params.data_src_size = 8;
2734 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2735 if (rc != ECORE_SUCCESS)
2736 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2737
2738 /* Store primary MAC for later possible WoL */
2739 OSAL_MEMCPY(p_hwfn->p_dev->wol_mac, mac, ETH_ALEN);
2740
2741 return rc;
2742 }
2743
2744 enum _ecore_status_t
2745 ecore_mcp_ov_update_wol(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2746 enum ecore_ov_wol wol)
2747 {
2748 enum _ecore_status_t rc;
2749 u32 resp = 0, param = 0;
2750 u32 drv_mb_param;
2751
2752 if (p_hwfn->hw_info.b_wol_support == ECORE_WOL_SUPPORT_NONE) {
2753 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2754 "Can't change WoL configuration when WoL isn't supported\n");
2755 return ECORE_INVAL;
2756 }
2757
2758 switch (wol) {
2759 case ECORE_OV_WOL_DEFAULT:
2760 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2761 break;
2762 case ECORE_OV_WOL_DISABLED:
2763 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2764 break;
2765 case ECORE_OV_WOL_ENABLED:
2766 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2767 break;
2768 default:
2769 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2770 return ECORE_INVAL;
2771 }
2772
2773 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2774 drv_mb_param, &resp, ¶m);
2775 if (rc != ECORE_SUCCESS)
2776 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2777
2778 /* Store the WoL update for a future unload */
2779 p_hwfn->p_dev->wol_config = (u8)wol;
2780
2781 return rc;
2782 }
2783
2784 enum _ecore_status_t
2785 ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2786 enum ecore_ov_eswitch eswitch)
2787 {
2788 enum _ecore_status_t rc;
2789 u32 resp = 0, param = 0;
2790 u32 drv_mb_param;
2791
2792 switch (eswitch) {
2793 case ECORE_OV_ESWITCH_NONE:
2794 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2795 break;
2796 case ECORE_OV_ESWITCH_VEB:
2797 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2798 break;
2799 case ECORE_OV_ESWITCH_VEPA:
2800 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2801 break;
2802 default:
2803 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2804 return ECORE_INVAL;
2805 }
2806
2807 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2808 drv_mb_param, &resp, ¶m);
2809 if (rc != ECORE_SUCCESS)
2810 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2811
2812 return rc;
2813 }
2814
2815 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2816 struct ecore_ptt *p_ptt,
2817 enum ecore_led_mode mode)
2818 {
2819 u32 resp = 0, param = 0, drv_mb_param;
2820 enum _ecore_status_t rc;
2821
2822 switch (mode) {
2823 case ECORE_LED_MODE_ON:
2824 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2825 break;
2826 case ECORE_LED_MODE_OFF:
2827 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2828 break;
2829 case ECORE_LED_MODE_RESTORE:
2830 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2831 break;
2832 default:
2833 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2834 return ECORE_INVAL;
2835 }
2836
2837 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2838 drv_mb_param, &resp, ¶m);
2839 if (rc != ECORE_SUCCESS)
2840 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2841
2842 return rc;
2843 }
2844
2845 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2846 struct ecore_ptt *p_ptt,
2847 u32 mask_parities)
2848 {
2849 enum _ecore_status_t rc;
2850 u32 resp = 0, param = 0;
2851
2852 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2853 mask_parities, &resp, ¶m);
2854
2855 if (rc != ECORE_SUCCESS) {
2856 DP_ERR(p_hwfn, "MCP response failure for mask parities, aborting\n");
2857 } else if (resp != FW_MSG_CODE_OK) {
2858 DP_ERR(p_hwfn, "MCP did not acknowledge mask parity request. Old MFW?\n");
2859 rc = ECORE_INVAL;
2860 }
2861
2862 return rc;
2863 }
2864
2865 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2866 u8 *p_buf, u32 len)
2867 {
2868 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2869 u32 bytes_left, offset, bytes_to_copy, buf_size;
2870 struct ecore_mcp_nvm_params params;
2871 struct ecore_ptt *p_ptt;
2872 enum _ecore_status_t rc = ECORE_SUCCESS;
2873
2874 p_ptt = ecore_ptt_acquire(p_hwfn);
2875 if (!p_ptt)
2876 return ECORE_BUSY;
2877
2878 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2879 bytes_left = len;
2880 offset = 0;
2881 params.type = ECORE_MCP_NVM_RD;
2882 params.nvm_rd.buf_size = &buf_size;
2883 params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
2884 while (bytes_left > 0) {
2885 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2886 MCP_DRV_NVM_BUF_LEN);
2887 params.nvm_common.offset = (addr + offset) |
2888 (bytes_to_copy <<
2889 DRV_MB_PARAM_NVM_LEN_SHIFT);
2890 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2891 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2892 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
2893 FW_MSG_CODE_NVM_OK)) {
2894 DP_NOTICE(p_dev, false, "MCP command rc = %d\n",
2895 rc);
2896 break;
2897 }
2898
2899 /* This can be a lengthy process, and it's possible scheduler
2900 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2901 */
2902 if (bytes_left % 0x1000 <
2903 (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
2904 OSAL_MSLEEP(1);
2905
2906 offset += *params.nvm_rd.buf_size;
2907 bytes_left -= *params.nvm_rd.buf_size;
2908 }
2909
2910 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2911 ecore_ptt_release(p_hwfn, p_ptt);
2912
2913 return rc;
2914 }
2915
2916 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2917 u32 addr, u8 *p_buf, u32 len)
2918 {
2919 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2920 struct ecore_mcp_nvm_params params;
2921 struct ecore_ptt *p_ptt;
2922 enum _ecore_status_t rc;
2923
2924 p_ptt = ecore_ptt_acquire(p_hwfn);
2925 if (!p_ptt)
2926 return ECORE_BUSY;
2927
2928 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2929 params.type = ECORE_MCP_NVM_RD;
2930 params.nvm_rd.buf_size = &len;
2931 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
2932 DRV_MSG_CODE_PHY_CORE_READ :
2933 DRV_MSG_CODE_PHY_RAW_READ;
2934 params.nvm_common.offset = addr;
2935 params.nvm_rd.buf = (u32 *)p_buf;
2936 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2937 if (rc != ECORE_SUCCESS)
2938 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2939
2940 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2941 ecore_ptt_release(p_hwfn, p_ptt);
2942
2943 return rc;
2944 }
2945
2946 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2947 {
2948 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2949 struct ecore_mcp_nvm_params params;
2950 struct ecore_ptt *p_ptt;
2951
2952 p_ptt = ecore_ptt_acquire(p_hwfn);
2953 if (!p_ptt)
2954 return ECORE_BUSY;
2955
2956 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2957 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2958 ecore_ptt_release(p_hwfn, p_ptt);
2959
2960 return ECORE_SUCCESS;
2961 }
2962
2963 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev,
2964 u32 addr)
2965 {
2966 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2967 struct ecore_mcp_nvm_params params;
2968 struct ecore_ptt *p_ptt;
2969 enum _ecore_status_t rc;
2970
2971 p_ptt = ecore_ptt_acquire(p_hwfn);
2972 if (!p_ptt)
2973 return ECORE_BUSY;
2974 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2975 params.type = ECORE_MCP_CMD;
2976 params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
2977 params.nvm_common.offset = addr;
2978 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2979 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2980 ecore_ptt_release(p_hwfn, p_ptt);
2981
2982 return rc;
2983 }
2984
2985 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2986 u32 addr)
2987 {
2988 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2989 struct ecore_mcp_nvm_params params;
2990 struct ecore_ptt *p_ptt;
2991 enum _ecore_status_t rc;
2992
2993 p_ptt = ecore_ptt_acquire(p_hwfn);
2994 if (!p_ptt)
2995 return ECORE_BUSY;
2996 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2997 params.type = ECORE_MCP_CMD;
2998 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2999 params.nvm_common.offset = addr;
3000 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3001 p_dev->mcp_nvm_resp = params.nvm_common.resp;
3002 ecore_ptt_release(p_hwfn, p_ptt);
3003
3004 return rc;
3005 }
3006
3007 /* rc recieves ECORE_INVAL as default parameter because
3008 * it might not enter the while loop if the len is 0
3009 */
3010 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
3011 u32 addr, u8 *p_buf, u32 len)
3012 {
3013 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3014 enum _ecore_status_t rc = ECORE_INVAL;
3015 struct ecore_mcp_nvm_params params;
3016 struct ecore_ptt *p_ptt;
3017 u32 buf_idx, buf_size;
3018
3019 p_ptt = ecore_ptt_acquire(p_hwfn);
3020 if (!p_ptt)
3021 return ECORE_BUSY;
3022
3023 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3024 params.type = ECORE_MCP_NVM_WR;
3025 switch (cmd) {
3026 case ECORE_PUT_FILE_DATA:
3027 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3028 break;
3029 case ECORE_NVM_WRITE_NVRAM:
3030 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3031 break;
3032 case ECORE_EXT_PHY_FW_UPGRADE:
3033 params.nvm_common.cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
3034 break;
3035 default:
3036 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
3037 cmd);
3038 return ECORE_INVAL;
3039 }
3040
3041 buf_idx = 0;
3042 while (buf_idx < len) {
3043 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3044 MCP_DRV_NVM_BUF_LEN);
3045 params.nvm_common.offset = ((buf_size <<
3046 DRV_MB_PARAM_NVM_LEN_SHIFT)
3047 | addr) + buf_idx;
3048 params.nvm_wr.buf_size = buf_size;
3049 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
3050 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3051 if (rc != ECORE_SUCCESS ||
3052 ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
3053 (params.nvm_common.resp !=
3054 FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
3055 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3056
3057 /* This can be a lengthy process, and it's possible scheduler
3058 * isn't preemptable. Sleep a bit to prevent CPU hogging.
3059 */
3060 if (buf_idx % 0x1000 >
3061 (buf_idx + buf_size) % 0x1000)
3062 OSAL_MSLEEP(1);
3063
3064 buf_idx += buf_size;
3065 }
3066
3067 p_dev->mcp_nvm_resp = params.nvm_common.resp;
3068 ecore_ptt_release(p_hwfn, p_ptt);
3069
3070 return rc;
3071 }
3072
3073 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
3074 u32 addr, u8 *p_buf, u32 len)
3075 {
3076 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3077 struct ecore_mcp_nvm_params params;
3078 struct ecore_ptt *p_ptt;
3079 enum _ecore_status_t rc;
3080
3081 p_ptt = ecore_ptt_acquire(p_hwfn);
3082 if (!p_ptt)
3083 return ECORE_BUSY;
3084
3085 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3086 params.type = ECORE_MCP_NVM_WR;
3087 params.nvm_wr.buf_size = len;
3088 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
3089 DRV_MSG_CODE_PHY_CORE_WRITE :
3090 DRV_MSG_CODE_PHY_RAW_WRITE;
3091 params.nvm_common.offset = addr;
3092 params.nvm_wr.buf = (u32 *)p_buf;
3093 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3094 if (rc != ECORE_SUCCESS)
3095 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3096 p_dev->mcp_nvm_resp = params.nvm_common.resp;
3097 ecore_ptt_release(p_hwfn, p_ptt);
3098
3099 return rc;
3100 }
3101
3102 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
3103 u32 addr)
3104 {
3105 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3106 struct ecore_mcp_nvm_params params;
3107 struct ecore_ptt *p_ptt;
3108 enum _ecore_status_t rc;
3109
3110 p_ptt = ecore_ptt_acquire(p_hwfn);
3111 if (!p_ptt)
3112 return ECORE_BUSY;
3113
3114 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3115 params.type = ECORE_MCP_CMD;
3116 params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
3117 params.nvm_common.offset = addr;
3118 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3119 p_dev->mcp_nvm_resp = params.nvm_common.resp;
3120 ecore_ptt_release(p_hwfn, p_ptt);
3121
3122 return rc;
3123 }
3124
3125 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
3126 struct ecore_ptt *p_ptt,
3127 u32 port, u32 addr, u32 offset,
3128 u32 len, u8 *p_buf)
3129 {
3130 struct ecore_mcp_nvm_params params;
3131 enum _ecore_status_t rc;
3132 u32 bytes_left, bytes_to_copy, buf_size;
3133
3134 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3135 params.nvm_common.offset =
3136 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
3137 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
3138 addr = offset;
3139 offset = 0;
3140 bytes_left = len;
3141 params.type = ECORE_MCP_NVM_RD;
3142 params.nvm_rd.buf_size = &buf_size;
3143 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
3144 while (bytes_left > 0) {
3145 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3146 MAX_I2C_TRANSACTION_SIZE);
3147 params.nvm_rd.buf = (u32 *)(p_buf + offset);
3148 params.nvm_common.offset &=
3149 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3150 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3151 params.nvm_common.offset |=
3152 ((addr + offset) <<
3153 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
3154 params.nvm_common.offset |=
3155 (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
3156 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3157 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
3158 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
3159 return ECORE_NODEV;
3160 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
3161 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3162 return ECORE_UNKNOWN_ERROR;
3163
3164 offset += *params.nvm_rd.buf_size;
3165 bytes_left -= *params.nvm_rd.buf_size;
3166 }
3167
3168 return ECORE_SUCCESS;
3169 }
3170
3171 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
3172 struct ecore_ptt *p_ptt,
3173 u32 port, u32 addr, u32 offset,
3174 u32 len, u8 *p_buf)
3175 {
3176 struct ecore_mcp_nvm_params params;
3177 enum _ecore_status_t rc;
3178 u32 buf_idx, buf_size;
3179
3180 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3181 params.nvm_common.offset =
3182 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
3183 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
3184 params.type = ECORE_MCP_NVM_WR;
3185 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
3186 buf_idx = 0;
3187 while (buf_idx < len) {
3188 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3189 MAX_I2C_TRANSACTION_SIZE);
3190 params.nvm_common.offset &=
3191 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3192 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3193 params.nvm_common.offset |=
3194 ((offset + buf_idx) <<
3195 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
3196 params.nvm_common.offset |=
3197 (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
3198 params.nvm_wr.buf_size = buf_size;
3199 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
3200 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3201 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
3202 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
3203 return ECORE_NODEV;
3204 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
3205 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3206 return ECORE_UNKNOWN_ERROR;
3207
3208 buf_idx += buf_size;
3209 }
3210
3211 return ECORE_SUCCESS;
3212 }
3213
3214 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3215 struct ecore_ptt *p_ptt,
3216 u16 gpio, u32 *gpio_val)
3217 {
3218 enum _ecore_status_t rc = ECORE_SUCCESS;
3219 u32 drv_mb_param = 0, rsp;
3220
3221 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
3222
3223 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
3224 drv_mb_param, &rsp, gpio_val);
3225
3226 if (rc != ECORE_SUCCESS)
3227 return rc;
3228
3229 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3230 return ECORE_UNKNOWN_ERROR;
3231
3232 return ECORE_SUCCESS;
3233 }
3234
3235 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
3236 struct ecore_ptt *p_ptt,
3237 u16 gpio, u16 gpio_val)
3238 {
3239 enum _ecore_status_t rc = ECORE_SUCCESS;
3240 u32 drv_mb_param = 0, param, rsp;
3241
3242 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
3243 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
3244
3245 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
3246 drv_mb_param, &rsp, ¶m);
3247
3248 if (rc != ECORE_SUCCESS)
3249 return rc;
3250
3251 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3252 return ECORE_UNKNOWN_ERROR;
3253
3254 return ECORE_SUCCESS;
3255 }
3256
3257 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
3258 struct ecore_ptt *p_ptt,
3259 u16 gpio, u32 *gpio_direction,
3260 u32 *gpio_ctrl)
3261 {
3262 u32 drv_mb_param = 0, rsp, val = 0;
3263 enum _ecore_status_t rc = ECORE_SUCCESS;
3264
3265 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
3266
3267 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
3268 drv_mb_param, &rsp, &val);
3269 if (rc != ECORE_SUCCESS)
3270 return rc;
3271
3272 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
3273 DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
3274 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
3275 DRV_MB_PARAM_GPIO_CTRL_SHIFT;
3276
3277 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3278 return ECORE_UNKNOWN_ERROR;
3279
3280 return ECORE_SUCCESS;
3281 }
3282
3283 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
3284 struct ecore_ptt *p_ptt)
3285 {
3286 u32 drv_mb_param = 0, rsp, param;
3287 enum _ecore_status_t rc = ECORE_SUCCESS;
3288
3289 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3290 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3291
3292 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3293 drv_mb_param, &rsp, ¶m);
3294
3295 if (rc != ECORE_SUCCESS)
3296 return rc;
3297
3298 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3299 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3300 rc = ECORE_UNKNOWN_ERROR;
3301
3302 return rc;
3303 }
3304
3305 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
3306 struct ecore_ptt *p_ptt)
3307 {
3308 u32 drv_mb_param, rsp, param;
3309 enum _ecore_status_t rc = ECORE_SUCCESS;
3310
3311 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3312 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3313
3314 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3315 drv_mb_param, &rsp, ¶m);
3316
3317 if (rc != ECORE_SUCCESS)
3318 return rc;
3319
3320 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3321 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3322 rc = ECORE_UNKNOWN_ERROR;
3323
3324 return rc;
3325 }
3326
3327 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3328 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3329 {
3330 u32 drv_mb_param = 0, rsp;
3331 enum _ecore_status_t rc = ECORE_SUCCESS;
3332
3333 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3334 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3335
3336 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3337 drv_mb_param, &rsp, num_images);
3338
3339 if (rc != ECORE_SUCCESS)
3340 return rc;
3341
3342 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3343 rc = ECORE_UNKNOWN_ERROR;
3344
3345 return rc;
3346 }
3347
3348 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3349 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3350 struct bist_nvm_image_att *p_image_att, u32 image_index)
3351 {
3352 struct ecore_mcp_nvm_params params;
3353 enum _ecore_status_t rc;
3354 u32 buf_size;
3355
3356 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3357 params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3358 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3359 params.nvm_common.offset |= (image_index <<
3360 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
3361
3362 params.type = ECORE_MCP_NVM_RD;
3363 params.nvm_rd.buf_size = &buf_size;
3364 params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
3365 params.nvm_rd.buf = (u32 *)p_image_att;
3366
3367 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3368 if (rc != ECORE_SUCCESS)
3369 return rc;
3370
3371 if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3372 (p_image_att->return_code != 1))
3373 rc = ECORE_UNKNOWN_ERROR;
3374
3375 return rc;
3376 }
3377
3378 enum _ecore_status_t
3379 ecore_mcp_get_nvm_image_att(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3380 enum ecore_nvm_images image_id,
3381 struct ecore_nvm_image_att *p_image_att)
3382 {
3383 struct bist_nvm_image_att mfw_image_att;
3384 enum nvm_image_type type;
3385 u32 num_images, i;
3386 enum _ecore_status_t rc;
3387
3388 /* Translate image_id into MFW definitions */
3389 switch (image_id) {
3390 case ECORE_NVM_IMAGE_ISCSI_CFG:
3391 type = NVM_TYPE_ISCSI_CFG;
3392 break;
3393 case ECORE_NVM_IMAGE_FCOE_CFG:
3394 type = NVM_TYPE_FCOE_CFG;
3395 break;
3396 case ECORE_NVM_IMAGE_MDUMP:
3397 type = NVM_TYPE_MDUMP;
3398 break;
3399 default:
3400 DP_NOTICE(p_hwfn, false, "Unknown request of image_id %08x\n",
3401 image_id);
3402 return ECORE_INVAL;
3403 }
3404
3405 /* Learn number of images, then traverse and see if one fits */
3406 rc = ecore_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
3407 if (rc != ECORE_SUCCESS || !num_images)
3408 return ECORE_INVAL;
3409
3410 for (i = 0; i < num_images; i++) {
3411 rc = ecore_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
3412 &mfw_image_att, i);
3413 if (rc != ECORE_SUCCESS)
3414 return rc;
3415
3416 if (type == mfw_image_att.image_type)
3417 break;
3418 }
3419 if (i == num_images) {
3420 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3421 "Failed to find nvram image of type %08x\n",
3422 image_id);
3423 return ECORE_INVAL;
3424 }
3425
3426 p_image_att->start_addr = mfw_image_att.nvm_start_addr;
3427 p_image_att->length = mfw_image_att.len;
3428
3429 return ECORE_SUCCESS;
3430 }
3431
3432 enum _ecore_status_t ecore_mcp_get_nvm_image(struct ecore_hwfn *p_hwfn,
3433 struct ecore_ptt *p_ptt,
3434 enum ecore_nvm_images image_id,
3435 u8 *p_buffer, u32 buffer_len)
3436 {
3437 struct ecore_nvm_image_att image_att;
3438 enum _ecore_status_t rc;
3439
3440 OSAL_MEM_ZERO(p_buffer, buffer_len);
3441
3442 rc = ecore_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att);
3443 if (rc != ECORE_SUCCESS)
3444 return rc;
3445
3446 /* Validate sizes - both the image's and the supplied buffer's */
3447 if (image_att.length <= 4) {
3448 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3449 "Image [%d] is too small - only %d bytes\n",
3450 image_id, image_att.length);
3451 return ECORE_INVAL;
3452 }
3453
3454 /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */
3455 image_att.length -= 4;
3456
3457 if (image_att.length > buffer_len) {
3458 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3459 "Image [%d] is too big - %08x bytes where only %08x are available\n",
3460 image_id, image_att.length, buffer_len);
3461 return ECORE_NOMEM;
3462 }
3463
3464 return ecore_mcp_nvm_read(p_hwfn->p_dev, image_att.start_addr,
3465 p_buffer, image_att.length);
3466 }
3467
3468 enum _ecore_status_t
3469 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3470 struct ecore_ptt *p_ptt,
3471 struct ecore_temperature_info *p_temp_info)
3472 {
3473 struct ecore_temperature_sensor *p_temp_sensor;
3474 struct temperature_status_stc mfw_temp_info;
3475 struct ecore_mcp_mb_params mb_params;
3476 u32 val;
3477 enum _ecore_status_t rc;
3478 u8 i;
3479
3480 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3481 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3482 mb_params.p_data_dst = &mfw_temp_info;
3483 mb_params.data_dst_size = sizeof(mfw_temp_info);
3484 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3485 if (rc != ECORE_SUCCESS)
3486 return rc;
3487
3488 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3489 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3490 ECORE_MAX_NUM_OF_SENSORS);
3491 for (i = 0; i < p_temp_info->num_sensors; i++) {
3492 val = mfw_temp_info.sensor[i];
3493 p_temp_sensor = &p_temp_info->sensors[i];
3494 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3495 SENSOR_LOCATION_SHIFT;
3496 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3497 THRESHOLD_HIGH_SHIFT;
3498 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3499 CRITICAL_TEMPERATURE_SHIFT;
3500 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3501 CURRENT_TEMP_SHIFT;
3502 }
3503
3504 return ECORE_SUCCESS;
3505 }
3506
3507 enum _ecore_status_t ecore_mcp_get_mba_versions(
3508 struct ecore_hwfn *p_hwfn,
3509 struct ecore_ptt *p_ptt,
3510 struct ecore_mba_vers *p_mba_vers)
3511 {
3512 struct ecore_mcp_nvm_params params;
3513 enum _ecore_status_t rc;
3514 u32 buf_size;
3515
3516 OSAL_MEM_ZERO(¶ms, sizeof(params));
3517 params.type = ECORE_MCP_NVM_RD;
3518 params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
3519 params.nvm_common.offset = 0;
3520 params.nvm_rd.buf = &(p_mba_vers->mba_vers[0]);
3521 params.nvm_rd.buf_size = &buf_size;
3522 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3523
3524 if (rc != ECORE_SUCCESS)
3525 return rc;
3526
3527 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
3528 FW_MSG_CODE_NVM_OK)
3529 rc = ECORE_UNKNOWN_ERROR;
3530
3531 if (buf_size != MCP_DRV_NVM_BUF_LEN)
3532 rc = ECORE_UNKNOWN_ERROR;
3533
3534 return rc;
3535 }
3536
3537 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3538 struct ecore_ptt *p_ptt,
3539 u64 *num_events)
3540 {
3541 u32 rsp;
3542
3543 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3544 0, &rsp, (u32 *)num_events);
3545 }
3546
3547 static enum resource_id_enum
3548 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3549 {
3550 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3551
3552 switch (res_id) {
3553 case ECORE_SB:
3554 mfw_res_id = RESOURCE_NUM_SB_E;
3555 break;
3556 case ECORE_L2_QUEUE:
3557 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3558 break;
3559 case ECORE_VPORT:
3560 mfw_res_id = RESOURCE_NUM_VPORT_E;
3561 break;
3562 case ECORE_RSS_ENG:
3563 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3564 break;
3565 case ECORE_PQ:
3566 mfw_res_id = RESOURCE_NUM_PQ_E;
3567 break;
3568 case ECORE_RL:
3569 mfw_res_id = RESOURCE_NUM_RL_E;
3570 break;
3571 case ECORE_MAC:
3572 case ECORE_VLAN:
3573 /* Each VFC resource can accommodate both a MAC and a VLAN */
3574 mfw_res_id = RESOURCE_VFC_FILTER_E;
3575 break;
3576 case ECORE_ILT:
3577 mfw_res_id = RESOURCE_ILT_E;
3578 break;
3579 case ECORE_LL2_QUEUE:
3580 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3581 break;
3582 case ECORE_RDMA_CNQ_RAM:
3583 case ECORE_CMDQS_CQS:
3584 /* CNQ/CMDQS are the same resource */
3585 mfw_res_id = RESOURCE_CQS_E;
3586 break;
3587 case ECORE_RDMA_STATS_QUEUE:
3588 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3589 break;
3590 case ECORE_BDQ:
3591 mfw_res_id = RESOURCE_BDQ_E;
3592 break;
3593 default:
3594 break;
3595 }
3596
3597 return mfw_res_id;
3598 }
3599
3600 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2
3601 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
3602 #define ECORE_RESC_ALLOC_VERSION \
3603 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
3604 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
3605 (ECORE_RESC_ALLOC_VERSION_MINOR << \
3606 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
3607
3608 struct ecore_resc_alloc_in_params {
3609 u32 cmd;
3610 enum ecore_resources res_id;
3611 u32 resc_max_val;
3612 };
3613
3614 struct ecore_resc_alloc_out_params {
3615 u32 mcp_resp;
3616 u32 mcp_param;
3617 u32 resc_num;
3618 u32 resc_start;
3619 u32 vf_resc_num;
3620 u32 vf_resc_start;
3621 u32 flags;
3622 };
3623
3624 static enum _ecore_status_t
3625 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
3626 struct ecore_ptt *p_ptt,
3627 struct ecore_resc_alloc_in_params *p_in_params,
3628 struct ecore_resc_alloc_out_params *p_out_params)
3629 {
3630 struct ecore_mcp_mb_params mb_params;
3631 struct resource_info mfw_resc_info;
3632 enum _ecore_status_t rc;
3633
3634 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
3635
3636 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
3637 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3638 DP_ERR(p_hwfn,
3639 "Failed to match resource %d [%s] with the MFW resources\n",
3640 p_in_params->res_id,
3641 ecore_hw_get_resc_name(p_in_params->res_id));
3642 return ECORE_INVAL;
3643 }
3644
3645 switch (p_in_params->cmd) {
3646 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3647 mfw_resc_info.size = p_in_params->resc_max_val;
3648 /* Fallthrough */
3649 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3650 break;
3651 default:
3652 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3653 p_in_params->cmd);
3654 return ECORE_INVAL;
3655 }
3656
3657 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3658 mb_params.cmd = p_in_params->cmd;
3659 mb_params.param = ECORE_RESC_ALLOC_VERSION;
3660 mb_params.p_data_src = &mfw_resc_info;
3661 mb_params.data_src_size = sizeof(mfw_resc_info);
3662 mb_params.p_data_dst = mb_params.p_data_src;
3663 mb_params.data_dst_size = mb_params.data_src_size;
3664
3665 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3666 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3667 p_in_params->cmd, p_in_params->res_id,
3668 ecore_hw_get_resc_name(p_in_params->res_id),
3669 ECORE_MFW_GET_FIELD(mb_params.param,
3670 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3671 ECORE_MFW_GET_FIELD(mb_params.param,
3672 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3673 p_in_params->resc_max_val);
3674
3675 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3676 if (rc != ECORE_SUCCESS)
3677 return rc;
3678
3679 p_out_params->mcp_resp = mb_params.mcp_resp;
3680 p_out_params->mcp_param = mb_params.mcp_param;
3681 p_out_params->resc_num = mfw_resc_info.size;
3682 p_out_params->resc_start = mfw_resc_info.offset;
3683 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3684 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3685 p_out_params->flags = mfw_resc_info.flags;
3686
3687 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3688 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3689 ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
3690 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3691 ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
3692 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3693 p_out_params->resc_num, p_out_params->resc_start,
3694 p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3695 p_out_params->flags);
3696
3697 return ECORE_SUCCESS;
3698 }
3699
3700 enum _ecore_status_t
3701 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3702 enum ecore_resources res_id, u32 resc_max_val,
3703 u32 *p_mcp_resp)
3704 {
3705 struct ecore_resc_alloc_out_params out_params;
3706 struct ecore_resc_alloc_in_params in_params;
3707 enum _ecore_status_t rc;
3708
3709 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3710 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3711 in_params.res_id = res_id;
3712 in_params.resc_max_val = resc_max_val;
3713 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3714 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3715 &out_params);
3716 if (rc != ECORE_SUCCESS)
3717 return rc;
3718
3719 *p_mcp_resp = out_params.mcp_resp;
3720
3721 return ECORE_SUCCESS;
3722 }
3723
3724 enum _ecore_status_t
3725 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3726 enum ecore_resources res_id, u32 *p_mcp_resp,
3727 u32 *p_resc_num, u32 *p_resc_start)
3728 {
3729 struct ecore_resc_alloc_out_params out_params;
3730 struct ecore_resc_alloc_in_params in_params;
3731 enum _ecore_status_t rc;
3732
3733 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3734 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3735 in_params.res_id = res_id;
3736 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3737 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3738 &out_params);
3739 if (rc != ECORE_SUCCESS)
3740 return rc;
3741
3742 *p_mcp_resp = out_params.mcp_resp;
3743
3744 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3745 *p_resc_num = out_params.resc_num;
3746 *p_resc_start = out_params.resc_start;
3747 }
3748
3749 return ECORE_SUCCESS;
3750 }
3751
3752 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3753 struct ecore_ptt *p_ptt)
3754 {
3755 u32 mcp_resp, mcp_param;
3756
3757 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3758 &mcp_resp, &mcp_param);
3759 }
3760
3761 enum _ecore_status_t ecore_mcp_get_lldp_mac(struct ecore_hwfn *p_hwfn,
3762 struct ecore_ptt *p_ptt,
3763 u8 lldp_mac_addr[ETH_ALEN])
3764 {
3765 struct ecore_mcp_mb_params mb_params;
3766 struct mcp_mac lldp_mac;
3767 enum _ecore_status_t rc;
3768
3769 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3770 mb_params.cmd = DRV_MSG_CODE_GET_LLDP_MAC;
3771 mb_params.p_data_dst = &lldp_mac;
3772 mb_params.data_dst_size = sizeof(lldp_mac);
3773 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3774 if (rc != ECORE_SUCCESS)
3775 return rc;
3776
3777 if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
3778 DP_NOTICE(p_hwfn, false,
3779 "MFW lacks support for the GET_LLDP_MAC command [resp 0x%08x]\n",
3780 mb_params.mcp_resp);
3781 return ECORE_INVAL;
3782 }
3783
3784 *(u16 *)lldp_mac_addr = *(u16 *)&lldp_mac.mac_upper;
3785 *(u32 *)(lldp_mac_addr + 2) = lldp_mac.mac_lower;
3786
3787 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3788 "LLDP MAC address is %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
3789 lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2],
3790 lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]);
3791
3792 return ECORE_SUCCESS;
3793 }
3794
3795 enum _ecore_status_t ecore_mcp_set_lldp_mac(struct ecore_hwfn *p_hwfn,
3796 struct ecore_ptt *p_ptt,
3797 u8 lldp_mac_addr[ETH_ALEN])
3798 {
3799 struct ecore_mcp_mb_params mb_params;
3800 struct mcp_mac lldp_mac;
3801 enum _ecore_status_t rc;
3802
3803 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3804 "Configuring LLDP MAC address to %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
3805 lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2],
3806 lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]);
3807
3808 OSAL_MEM_ZERO(&lldp_mac, sizeof(lldp_mac));
3809 lldp_mac.mac_upper = *(u16 *)lldp_mac_addr;
3810 lldp_mac.mac_lower = *(u32 *)(lldp_mac_addr + 2);
3811
3812 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3813 mb_params.cmd = DRV_MSG_CODE_SET_LLDP_MAC;
3814 mb_params.p_data_src = &lldp_mac;
3815 mb_params.data_src_size = sizeof(lldp_mac);
3816 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3817 if (rc != ECORE_SUCCESS)
3818 return rc;
3819
3820 if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
3821 DP_NOTICE(p_hwfn, false,
3822 "MFW lacks support for the SET_LLDP_MAC command [resp 0x%08x]\n",
3823 mb_params.mcp_resp);
3824 return ECORE_INVAL;
3825 }
3826
3827 return ECORE_SUCCESS;
3828 }
3829
3830 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3831 struct ecore_ptt *p_ptt,
3832 u32 param, u32 *p_mcp_resp,
3833 u32 *p_mcp_param)
3834 {
3835 enum _ecore_status_t rc;
3836
3837 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3838 p_mcp_resp, p_mcp_param);
3839 if (rc != ECORE_SUCCESS)
3840 return rc;
3841
3842 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3843 DP_INFO(p_hwfn,
3844 "The resource command is unsupported by the MFW\n");
3845 return ECORE_NOTIMPL;
3846 }
3847
3848 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3849 u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3850
3851 DP_NOTICE(p_hwfn, false,
3852 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3853 param, opcode);
3854 return ECORE_INVAL;
3855 }
3856
3857 return rc;
3858 }
3859
3860 enum _ecore_status_t
3861 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3862 struct ecore_resc_lock_params *p_params)
3863 {
3864 u32 param = 0, mcp_resp, mcp_param;
3865 u8 opcode;
3866 enum _ecore_status_t rc;
3867
3868 switch (p_params->timeout) {
3869 case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3870 opcode = RESOURCE_OPCODE_REQ;
3871 p_params->timeout = 0;
3872 break;
3873 case ECORE_MCP_RESC_LOCK_TO_NONE:
3874 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3875 p_params->timeout = 0;
3876 break;
3877 default:
3878 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3879 break;
3880 }
3881
3882 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3883 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3884 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3885
3886 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3887 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3888 param, p_params->timeout, opcode, p_params->resource);
3889
3890 /* Attempt to acquire the resource */
3891 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3892 &mcp_param);
3893 if (rc != ECORE_SUCCESS)
3894 return rc;
3895
3896 /* Analyze the response */
3897 p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
3898 RESOURCE_CMD_RSP_OWNER);
3899 opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3900
3901 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3902 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3903 mcp_param, opcode, p_params->owner);
3904
3905 switch (opcode) {
3906 case RESOURCE_OPCODE_GNT:
3907 p_params->b_granted = true;
3908 break;
3909 case RESOURCE_OPCODE_BUSY:
3910 p_params->b_granted = false;
3911 break;
3912 default:
3913 DP_NOTICE(p_hwfn, false,
3914 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3915 mcp_param, opcode);
3916 return ECORE_INVAL;
3917 }
3918
3919 return ECORE_SUCCESS;
3920 }
3921
3922 enum _ecore_status_t
3923 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3924 struct ecore_resc_lock_params *p_params)
3925 {
3926 u32 retry_cnt = 0;
3927 enum _ecore_status_t rc;
3928
3929 do {
3930 /* No need for an interval before the first iteration */
3931 if (retry_cnt) {
3932 if (p_params->sleep_b4_retry) {
3933 u16 retry_interval_in_ms =
3934 DIV_ROUND_UP(p_params->retry_interval,
3935 1000);
3936
3937 OSAL_MSLEEP(retry_interval_in_ms);
3938 } else {
3939 OSAL_UDELAY(p_params->retry_interval);
3940 }
3941 }
3942
3943 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3944 if (rc != ECORE_SUCCESS)
3945 return rc;
3946
3947 if (p_params->b_granted)
3948 break;
3949 } while (retry_cnt++ < p_params->retry_num);
3950
3951 return ECORE_SUCCESS;
3952 }
3953
3954 enum _ecore_status_t
3955 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3956 struct ecore_resc_unlock_params *p_params)
3957 {
3958 u32 param = 0, mcp_resp, mcp_param;
3959 u8 opcode;
3960 enum _ecore_status_t rc;
3961
3962 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3963 : RESOURCE_OPCODE_RELEASE;
3964 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3965 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3966
3967 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3968 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3969 param, opcode, p_params->resource);
3970
3971 /* Attempt to release the resource */
3972 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3973 &mcp_param);
3974 if (rc != ECORE_SUCCESS)
3975 return rc;
3976
3977 /* Analyze the response */
3978 opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3979
3980 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3981 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3982 mcp_param, opcode);
3983
3984 switch (opcode) {
3985 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3986 DP_INFO(p_hwfn,
3987 "Resource unlock request for an already released resource [%d]\n",
3988 p_params->resource);
3989 /* Fallthrough */
3990 case RESOURCE_OPCODE_RELEASED:
3991 p_params->b_released = true;
3992 break;
3993 case RESOURCE_OPCODE_WRONG_OWNER:
3994 p_params->b_released = false;
3995 break;
3996 default:
3997 DP_NOTICE(p_hwfn, false,
3998 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3999 mcp_param, opcode);
4000 return ECORE_INVAL;
4001 }
4002
4003 return ECORE_SUCCESS;
4004 }
4005
4006 enum _ecore_status_t
4007 ecore_mcp_update_fcoe_cvid(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4008 u16 vlan)
4009 {
4010 u32 resp = 0, param = 0;
4011 enum _ecore_status_t rc;
4012
4013 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OEM_UPDATE_FCOE_CVID,
4014 (u32)vlan << DRV_MB_PARAM_FCOE_CVID_SHIFT,
4015 &resp, ¶m);
4016 if (rc != ECORE_SUCCESS)
4017 DP_ERR(p_hwfn, "Failed to update fcoe vlan, rc = %d\n", rc);
4018
4019 return rc;
4020 }
4021
4022 enum _ecore_status_t
4023 ecore_mcp_update_fcoe_fabric_name(struct ecore_hwfn *p_hwfn,
4024 struct ecore_ptt *p_ptt, u8 *wwn)
4025 {
4026 struct ecore_mcp_mb_params mb_params;
4027 struct mcp_wwn fabric_name;
4028 enum _ecore_status_t rc;
4029
4030 OSAL_MEM_ZERO(&fabric_name, sizeof(fabric_name));
4031 fabric_name.wwn_upper = *(u32 *)wwn;
4032 fabric_name.wwn_lower = *(u32 *)(wwn + 4);
4033
4034 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4035 mb_params.cmd = DRV_MSG_CODE_OEM_UPDATE_FCOE_FABRIC_NAME;
4036 mb_params.p_data_src = &fabric_name;
4037 mb_params.data_src_size = sizeof(fabric_name);
4038 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4039 if (rc != ECORE_SUCCESS)
4040 DP_ERR(p_hwfn, "Failed to update fcoe wwn, rc = %d\n", rc);
4041
4042 return rc;
4043 }
4044
4045 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4046 u32 offset, u32 val)
4047 {
4048 struct ecore_mcp_mb_params mb_params = {0};
4049 enum _ecore_status_t rc = ECORE_SUCCESS;
4050 u32 dword = val;
4051
4052 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
4053 mb_params.param = offset;
4054 mb_params.p_data_src = &dword;
4055 mb_params.data_src_size = sizeof(dword);
4056
4057 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4058 if (rc != ECORE_SUCCESS) {
4059 DP_NOTICE(p_hwfn, false,
4060 "Failed to wol write request, rc = %d\n", rc);
4061 }
4062
4063 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
4064 DP_NOTICE(p_hwfn, false,
4065 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
4066 val, offset, mb_params.mcp_resp);
4067 rc = ECORE_UNKNOWN_ERROR;
4068 }
4069 }
4070
4071 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
4072 struct ecore_ptt *p_ptt)
4073 {
4074 u32 mcp_resp;
4075 enum _ecore_status_t rc;
4076
4077 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
4078 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
4079 if (rc == ECORE_SUCCESS)
4080 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
4081 "MFW supported features: %08x\n",
4082 p_hwfn->mcp_info->capabilities);
4083
4084 return rc;
4085 }
4086
4087 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
4088 struct ecore_ptt *p_ptt)
4089 {
4090 u32 mcp_resp, mcp_param, features;
4091
4092 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
4093 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
4094
4095 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
4096 features, &mcp_resp, &mcp_param);
4097 }