16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1, (the "License").
26
27 * You may not use this file except in compliance with the License.
28
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35
36 #include "bcm_osal.h"
37 #include "ecore.h"
38 #include "ecore_status.h"
39 #include "nvm_map.h"
40 #include "nvm_cfg.h"
41 #include "ecore_mcp.h"
42 #include "mcp_public.h"
43 #include "reg_addr.h"
44 #include "ecore_hw.h"
45 #include "ecore_init_fw_funcs.h"
46 #include "ecore_sriov.h"
47 #include "ecore_vf.h"
48 #include "ecore_iov_api.h"
49 #include "ecore_gtt_reg_addr.h"
50 #include "ecore_iro.h"
51 #include "ecore_dcbx.h"
52 #include "ecore_sp_commands.h"
53
54 #define CHIP_MCP_RESP_ITER_US 10
55 #define EMUL_MCP_RESP_ITER_US 1000 * 1000
345 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
346 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
347 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
348 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
349 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
350 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
351 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
352
353 DP_NOTICE(p_hwfn, false,
354 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
355 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
356 }
357
358 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
359 struct ecore_ptt *p_ptt,
360 u32 cmd, u32 param,
361 u32 *o_mcp_resp, u32 *o_mcp_param)
362 {
363 u32 delay = CHIP_MCP_RESP_ITER_US;
364 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
365 u32 seq, cnt = 1, actual_mb_seq;
366 enum _ecore_status_t rc = ECORE_SUCCESS;
367
368 #ifndef ASIC_ONLY
369 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
370 delay = EMUL_MCP_RESP_ITER_US;
371 /* There is a built-in delay of 100usec in each MFW response read */
372 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
373 max_retries /= 10;
374 #endif
375
376 /* Get actual driver mailbox sequence */
377 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
378 DRV_MSG_SEQ_NUMBER_MASK;
379
380 /* Use MCP history register to check if MCP reset occurred between
381 * init time and now.
382 */
383 if (p_hwfn->mcp_info->mcp_hist !=
384 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
385 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
1416 return;
1417
1418 if (p_dev->recov_in_prog) {
1419 DP_NOTICE(p_hwfn, false,
1420 "Ignoring the indication since a recovery process is already in progress\n");
1421 return;
1422 }
1423
1424 p_dev->recov_in_prog = true;
1425
1426 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1427 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1428
1429 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1430 }
1431
1432 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1433 struct ecore_ptt *p_ptt,
1434 enum MFW_DRV_MSG_TYPE type)
1435 {
1436 enum ecore_mcp_protocol_type stats_type;
1437 union ecore_mcp_protocol_stats stats;
1438 struct ecore_mcp_mb_params mb_params;
1439 u32 hsi_param;
1440 enum _ecore_status_t rc;
1441
1442 switch (type) {
1443 case MFW_DRV_MSG_GET_LAN_STATS:
1444 stats_type = ECORE_MCP_LAN_STATS;
1445 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1446 break;
1447 case MFW_DRV_MSG_GET_FCOE_STATS:
1448 stats_type = ECORE_MCP_FCOE_STATS;
1449 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1450 break;
1451 case MFW_DRV_MSG_GET_ISCSI_STATS:
1452 stats_type = ECORE_MCP_ISCSI_STATS;
1453 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1454 break;
1455 case MFW_DRV_MSG_GET_RDMA_STATS:
1456 stats_type = ECORE_MCP_RDMA_STATS;
3111 if (!p_ptt)
3112 return ECORE_BUSY;
3113
3114 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3115 params.type = ECORE_MCP_CMD;
3116 params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
3117 params.nvm_common.offset = addr;
3118 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3119 p_dev->mcp_nvm_resp = params.nvm_common.resp;
3120 ecore_ptt_release(p_hwfn, p_ptt);
3121
3122 return rc;
3123 }
3124
3125 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
3126 struct ecore_ptt *p_ptt,
3127 u32 port, u32 addr, u32 offset,
3128 u32 len, u8 *p_buf)
3129 {
3130 struct ecore_mcp_nvm_params params;
3131 enum _ecore_status_t rc;
3132 u32 bytes_left, bytes_to_copy, buf_size;
3133
3134 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3135 params.nvm_common.offset =
3136 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
3137 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
3138 addr = offset;
3139 offset = 0;
3140 bytes_left = len;
3141 params.type = ECORE_MCP_NVM_RD;
3142 params.nvm_rd.buf_size = &buf_size;
3143 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
3144 while (bytes_left > 0) {
3145 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3146 MAX_I2C_TRANSACTION_SIZE);
3147 params.nvm_rd.buf = (u32 *)(p_buf + offset);
3148 params.nvm_common.offset &=
3149 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3150 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3151 params.nvm_common.offset |=
3152 ((addr + offset) <<
3153 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
3154 params.nvm_common.offset |=
3155 (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
3156 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3157 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
3158 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
3159 return ECORE_NODEV;
3160 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
3161 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3162 return ECORE_UNKNOWN_ERROR;
3163
3164 offset += *params.nvm_rd.buf_size;
3165 bytes_left -= *params.nvm_rd.buf_size;
3166 }
3167
3168 return ECORE_SUCCESS;
3169 }
3170
3171 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
3172 struct ecore_ptt *p_ptt,
3173 u32 port, u32 addr, u32 offset,
3174 u32 len, u8 *p_buf)
3175 {
3176 struct ecore_mcp_nvm_params params;
3177 enum _ecore_status_t rc;
3178 u32 buf_idx, buf_size;
3179
3180 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3181 params.nvm_common.offset =
3182 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
3183 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
3184 params.type = ECORE_MCP_NVM_WR;
3185 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
3186 buf_idx = 0;
3187 while (buf_idx < len) {
3188 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3189 MAX_I2C_TRANSACTION_SIZE);
3190 params.nvm_common.offset &=
3191 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3192 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3193 params.nvm_common.offset |=
3194 ((offset + buf_idx) <<
3195 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
3196 params.nvm_common.offset |=
3197 (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
3198 params.nvm_wr.buf_size = buf_size;
3199 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
3200 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3201 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
3202 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
3203 return ECORE_NODEV;
3204 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
3205 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3206 return ECORE_UNKNOWN_ERROR;
3207
3208 buf_idx += buf_size;
3209 }
3210
3211 return ECORE_SUCCESS;
3212 }
3213
3214 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3215 struct ecore_ptt *p_ptt,
3216 u16 gpio, u32 *gpio_val)
3217 {
3218 enum _ecore_status_t rc = ECORE_SUCCESS;
3219 u32 drv_mb_param = 0, rsp;
3220
|
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1, (the "License").
26
27 * You may not use this file except in compliance with the License.
28
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35
36 /*
37 * Copyright 2018 Joyent, Inc.
38 */
39
40 #include "bcm_osal.h"
41 #include "ecore.h"
42 #include "ecore_status.h"
43 #include "nvm_map.h"
44 #include "nvm_cfg.h"
45 #include "ecore_mcp.h"
46 #include "mcp_public.h"
47 #include "reg_addr.h"
48 #include "ecore_hw.h"
49 #include "ecore_init_fw_funcs.h"
50 #include "ecore_sriov.h"
51 #include "ecore_vf.h"
52 #include "ecore_iov_api.h"
53 #include "ecore_gtt_reg_addr.h"
54 #include "ecore_iro.h"
55 #include "ecore_dcbx.h"
56 #include "ecore_sp_commands.h"
57
58 #define CHIP_MCP_RESP_ITER_US 10
59 #define EMUL_MCP_RESP_ITER_US 1000 * 1000
349 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
350 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
351 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
352 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
353 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
354 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
355 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
356
357 DP_NOTICE(p_hwfn, false,
358 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
359 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
360 }
361
362 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
363 struct ecore_ptt *p_ptt,
364 u32 cmd, u32 param,
365 u32 *o_mcp_resp, u32 *o_mcp_param)
366 {
367 u32 delay = CHIP_MCP_RESP_ITER_US;
368 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
369 u32 seq, cnt = 1, actual_mb_seq __unused;
370 enum _ecore_status_t rc = ECORE_SUCCESS;
371
372 #ifndef ASIC_ONLY
373 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
374 delay = EMUL_MCP_RESP_ITER_US;
375 /* There is a built-in delay of 100usec in each MFW response read */
376 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
377 max_retries /= 10;
378 #endif
379
380 /* Get actual driver mailbox sequence */
381 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
382 DRV_MSG_SEQ_NUMBER_MASK;
383
384 /* Use MCP history register to check if MCP reset occurred between
385 * init time and now.
386 */
387 if (p_hwfn->mcp_info->mcp_hist !=
388 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
389 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
1420 return;
1421
1422 if (p_dev->recov_in_prog) {
1423 DP_NOTICE(p_hwfn, false,
1424 "Ignoring the indication since a recovery process is already in progress\n");
1425 return;
1426 }
1427
1428 p_dev->recov_in_prog = true;
1429
1430 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1431 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1432
1433 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1434 }
1435
1436 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1437 struct ecore_ptt *p_ptt,
1438 enum MFW_DRV_MSG_TYPE type)
1439 {
1440 enum ecore_mcp_protocol_type stats_type __unused;
1441 union ecore_mcp_protocol_stats stats;
1442 struct ecore_mcp_mb_params mb_params;
1443 u32 hsi_param;
1444 enum _ecore_status_t rc;
1445
1446 switch (type) {
1447 case MFW_DRV_MSG_GET_LAN_STATS:
1448 stats_type = ECORE_MCP_LAN_STATS;
1449 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1450 break;
1451 case MFW_DRV_MSG_GET_FCOE_STATS:
1452 stats_type = ECORE_MCP_FCOE_STATS;
1453 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1454 break;
1455 case MFW_DRV_MSG_GET_ISCSI_STATS:
1456 stats_type = ECORE_MCP_ISCSI_STATS;
1457 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1458 break;
1459 case MFW_DRV_MSG_GET_RDMA_STATS:
1460 stats_type = ECORE_MCP_RDMA_STATS;
3115 if (!p_ptt)
3116 return ECORE_BUSY;
3117
3118 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3119 params.type = ECORE_MCP_CMD;
3120 params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
3121 params.nvm_common.offset = addr;
3122 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3123 p_dev->mcp_nvm_resp = params.nvm_common.resp;
3124 ecore_ptt_release(p_hwfn, p_ptt);
3125
3126 return rc;
3127 }
3128
3129 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
3130 struct ecore_ptt *p_ptt,
3131 u32 port, u32 addr, u32 offset,
3132 u32 len, u8 *p_buf)
3133 {
3134 struct ecore_mcp_nvm_params params;
3135 u32 bytes_left, bytes_to_copy, buf_size;
3136
3137 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3138 params.nvm_common.offset =
3139 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
3140 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
3141 addr = offset;
3142 offset = 0;
3143 bytes_left = len;
3144 params.type = ECORE_MCP_NVM_RD;
3145 params.nvm_rd.buf_size = &buf_size;
3146 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
3147 while (bytes_left > 0) {
3148 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3149 MAX_I2C_TRANSACTION_SIZE);
3150 params.nvm_rd.buf = (u32 *)(p_buf + offset);
3151 params.nvm_common.offset &=
3152 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3153 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3154 params.nvm_common.offset |=
3155 ((addr + offset) <<
3156 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
3157 params.nvm_common.offset |=
3158 (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
3159 (void) ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3160 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
3161 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
3162 return ECORE_NODEV;
3163 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
3164 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3165 return ECORE_UNKNOWN_ERROR;
3166
3167 offset += *params.nvm_rd.buf_size;
3168 bytes_left -= *params.nvm_rd.buf_size;
3169 }
3170
3171 return ECORE_SUCCESS;
3172 }
3173
3174 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
3175 struct ecore_ptt *p_ptt,
3176 u32 port, u32 addr, u32 offset,
3177 u32 len, u8 *p_buf)
3178 {
3179 struct ecore_mcp_nvm_params params;
3180 u32 buf_idx, buf_size;
3181
3182 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3183 params.nvm_common.offset =
3184 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
3185 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
3186 params.type = ECORE_MCP_NVM_WR;
3187 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
3188 buf_idx = 0;
3189 while (buf_idx < len) {
3190 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3191 MAX_I2C_TRANSACTION_SIZE);
3192 params.nvm_common.offset &=
3193 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3194 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3195 params.nvm_common.offset |=
3196 ((offset + buf_idx) <<
3197 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
3198 params.nvm_common.offset |=
3199 (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
3200 params.nvm_wr.buf_size = buf_size;
3201 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
3202 (void) ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3203 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
3204 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
3205 return ECORE_NODEV;
3206 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
3207 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3208 return ECORE_UNKNOWN_ERROR;
3209
3210 buf_idx += buf_size;
3211 }
3212
3213 return ECORE_SUCCESS;
3214 }
3215
3216 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3217 struct ecore_ptt *p_ptt,
3218 u16 gpio, u32 *gpio_val)
3219 {
3220 enum _ecore_status_t rc = ECORE_SUCCESS;
3221 u32 drv_mb_param = 0, rsp;
3222
|