Print this page
9724 qede needs updates for newer GCC
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/qede/579xx/drivers/ecore/ecore_mcp.c
+++ new/usr/src/uts/common/io/qede/579xx/drivers/ecore/ecore_mcp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, v.1, (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://opensource.org/licenses/CDDL-1.0.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2014-2017 Cavium, Inc.
24 24 * The contents of this file are subject to the terms of the Common Development
25 25 * and Distribution License, v.1, (the "License").
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
26 26
27 27 * You may not use this file except in compliance with the License.
28 28
29 29 * You can obtain a copy of the License at available
30 30 * at http://opensource.org/licenses/CDDL-1.0
31 31
32 32 * See the License for the specific language governing permissions and
33 33 * limitations under the License.
34 34 */
35 35
36 +/*
37 + * Copyright 2018 Joyent, Inc.
38 + */
39 +
36 40 #include "bcm_osal.h"
37 41 #include "ecore.h"
38 42 #include "ecore_status.h"
39 43 #include "nvm_map.h"
40 44 #include "nvm_cfg.h"
41 45 #include "ecore_mcp.h"
42 46 #include "mcp_public.h"
43 47 #include "reg_addr.h"
44 48 #include "ecore_hw.h"
45 49 #include "ecore_init_fw_funcs.h"
46 50 #include "ecore_sriov.h"
47 51 #include "ecore_vf.h"
48 52 #include "ecore_iov_api.h"
49 53 #include "ecore_gtt_reg_addr.h"
50 54 #include "ecore_iro.h"
51 55 #include "ecore_dcbx.h"
52 56 #include "ecore_sp_commands.h"
53 57
54 58 #define CHIP_MCP_RESP_ITER_US 10
55 59 #define EMUL_MCP_RESP_ITER_US 1000 * 1000
56 60
57 61 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
58 62 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
59 63
60 64 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
61 65 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
62 66 _val)
63 67
64 68 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
65 69 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
66 70
67 71 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
68 72 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
69 73 OFFSETOF(struct public_drv_mb, _field), _val)
70 74
71 75 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
72 76 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
73 77 OFFSETOF(struct public_drv_mb, _field))
74 78
75 79 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
76 80 DRV_ID_PDA_COMP_VER_SHIFT)
77 81
78 82 #define MCP_BYTES_PER_MBIT_SHIFT 17
79 83
80 84 #ifndef ASIC_ONLY
81 85 static int loaded;
82 86 static int loaded_port[MAX_NUM_PORTS] = { 0 };
83 87 #endif
84 88
85 89 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
86 90 {
87 91 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
88 92 return false;
89 93 return true;
90 94 }
91 95
92 96 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
93 97 struct ecore_ptt *p_ptt)
94 98 {
95 99 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
96 100 PUBLIC_PORT);
97 101 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
98 102
99 103 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
100 104 MFW_PORT(p_hwfn));
101 105 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
102 106 "port_addr = 0x%x, port_id 0x%02x\n",
103 107 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
104 108 }
105 109
106 110 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn,
107 111 struct ecore_ptt *p_ptt)
108 112 {
109 113 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
110 114 OSAL_BE32 tmp;
111 115 u32 i;
112 116
113 117 #ifndef ASIC_ONLY
114 118 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
115 119 return;
116 120 #endif
117 121
118 122 if (!p_hwfn->mcp_info->public_base)
119 123 return;
120 124
121 125 for (i = 0; i < length; i++) {
122 126 tmp = ecore_rd(p_hwfn, p_ptt,
123 127 p_hwfn->mcp_info->mfw_mb_addr +
124 128 (i << 2) + sizeof(u32));
125 129
126 130 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
127 131 OSAL_BE32_TO_CPU(tmp);
128 132 }
129 133 }
130 134
131 135 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
132 136 {
133 137 if (p_hwfn->mcp_info) {
134 138 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
135 139 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
136 140 #ifdef CONFIG_ECORE_LOCK_ALLOC
137 141 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
138 142 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
139 143 #endif
140 144 }
141 145 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
142 146 p_hwfn->mcp_info = OSAL_NULL;
143 147
144 148 return ECORE_SUCCESS;
145 149 }
146 150
147 151 enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
148 152 struct ecore_ptt *p_ptt)
149 153 {
150 154 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
151 155 u32 drv_mb_offsize, mfw_mb_offsize;
152 156 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
153 157
154 158 #ifndef ASIC_ONLY
155 159 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
156 160 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
157 161 p_info->public_base = 0;
158 162 return ECORE_INVAL;
159 163 }
160 164 #endif
161 165
162 166 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
163 167 if (!p_info->public_base)
164 168 return ECORE_INVAL;
165 169
166 170 p_info->public_base |= GRCBASE_MCP;
167 171
168 172 /* Calculate the driver and MFW mailbox address */
169 173 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
170 174 SECTION_OFFSIZE_ADDR(p_info->public_base,
171 175 PUBLIC_DRV_MB));
172 176 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
173 177 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
174 178 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
175 179 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
176 180
177 181 /* Set the MFW MB address */
178 182 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
179 183 SECTION_OFFSIZE_ADDR(p_info->public_base,
180 184 PUBLIC_MFW_MB));
181 185 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
182 186 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
183 187 p_info->mfw_mb_addr);
184 188
185 189 /* Get the current driver mailbox sequence before sending
186 190 * the first command
187 191 */
188 192 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
189 193 DRV_MSG_SEQ_NUMBER_MASK;
190 194
191 195 /* Get current FW pulse sequence */
192 196 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
193 197 DRV_PULSE_SEQ_MASK;
194 198
195 199 p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
196 200 MISCS_REG_GENERIC_POR_0);
197 201
198 202 return ECORE_SUCCESS;
199 203 }
200 204
201 205 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
202 206 struct ecore_ptt *p_ptt)
203 207 {
204 208 struct ecore_mcp_info *p_info;
205 209 u32 size;
206 210
207 211 /* Allocate mcp_info structure */
208 212 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
209 213 sizeof(*p_hwfn->mcp_info));
210 214 if (!p_hwfn->mcp_info)
211 215 goto err;
212 216 p_info = p_hwfn->mcp_info;
213 217
214 218 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
215 219 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
216 220 /* Do not free mcp_info here, since public_base indicate that
217 221 * the MCP is not initialized
218 222 */
219 223 return ECORE_SUCCESS;
220 224 }
221 225
222 226 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
223 227 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
224 228 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
225 229 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
226 230 goto err;
227 231
228 232 /* Initialize the MFW spinlock */
229 233 #ifdef CONFIG_ECORE_LOCK_ALLOC
230 234 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
231 235 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
232 236 #endif
233 237 OSAL_SPIN_LOCK_INIT(&p_info->lock);
234 238 OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
235 239
236 240 return ECORE_SUCCESS;
237 241
238 242 err:
239 243 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
240 244 ecore_mcp_free(p_hwfn);
241 245 return ECORE_NOMEM;
242 246
243 247 }
244 248
245 249 /* Locks the MFW mailbox of a PF to ensure a single access.
246 250 * The lock is achieved in most cases by holding a spinlock, causing other
247 251 * threads to wait till a previous access is done.
248 252 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
249 253 * access is achieved by setting a blocking flag, which will fail other
250 254 * competing contexts to send their mailboxes.
251 255 */
252 256 static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
253 257 u32 cmd)
254 258 {
255 259 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
256 260
257 261 /* The spinlock shouldn't be acquired when the mailbox command is
258 262 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
259 263 * pending [UN]LOAD_REQ command of another PF together with a spinlock
260 264 * (i.e. interrupts are disabled) - can lead to a deadlock.
261 265 * It is assumed that for a single PF, no other mailbox commands can be
262 266 * sent from another context while sending LOAD_REQ, and that any
263 267 * parallel commands to UNLOAD_REQ can be cancelled.
264 268 */
265 269 if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
266 270 p_hwfn->mcp_info->block_mb_sending = false;
267 271
268 272 /* There's at least a single command that is sent by ecore during the
269 273 * load sequence [expectation of MFW].
270 274 */
271 275 if ((p_hwfn->mcp_info->block_mb_sending) &&
272 276 (cmd != DRV_MSG_CODE_FEATURE_SUPPORT)) {
273 277 DP_NOTICE(p_hwfn, false,
274 278 "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
275 279 cmd);
276 280 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
277 281 return ECORE_BUSY;
278 282 }
279 283
280 284 if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
281 285 p_hwfn->mcp_info->block_mb_sending = true;
282 286 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
283 287 }
284 288
285 289 return ECORE_SUCCESS;
286 290 }
287 291
288 292 static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
289 293 {
290 294 if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
291 295 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
292 296 }
293 297
294 298 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
295 299 struct ecore_ptt *p_ptt)
296 300 {
297 301 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
298 302 u32 delay = CHIP_MCP_RESP_ITER_US;
299 303 u32 org_mcp_reset_seq, cnt = 0;
300 304 enum _ecore_status_t rc = ECORE_SUCCESS;
301 305
302 306 #ifndef ASIC_ONLY
303 307 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
304 308 delay = EMUL_MCP_RESP_ITER_US;
305 309 #endif
306 310
307 311 /* Ensure that only a single thread is accessing the mailbox at a
308 312 * certain time.
309 313 */
310 314 rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
311 315 if (rc != ECORE_SUCCESS)
312 316 return rc;
313 317
314 318 /* Set drv command along with the updated sequence */
315 319 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
316 320 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
317 321
318 322 do {
319 323 /* Wait for MFW response */
320 324 OSAL_UDELAY(delay);
321 325 /* Give the FW up to 500 second (50*1000*10usec) */
322 326 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
323 327 MISCS_REG_GENERIC_POR_0)) &&
324 328 (cnt++ < ECORE_MCP_RESET_RETRIES));
325 329
326 330 if (org_mcp_reset_seq !=
327 331 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
328 332 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
329 333 "MCP was reset after %d usec\n", cnt * delay);
330 334 } else {
331 335 DP_ERR(p_hwfn, "Failed to reset MCP\n");
332 336 rc = ECORE_AGAIN;
333 337 }
334 338
335 339 ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
336 340
337 341 return rc;
338 342 }
339 343
340 344 void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
341 345 struct ecore_ptt *p_ptt)
342 346 {
343 347 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
344 348
345 349 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
346 350 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
347 351 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
348 352 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
349 353 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
350 354 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
351 355 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
352 356
353 357 DP_NOTICE(p_hwfn, false,
354 358 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
↓ open down ↓ |
309 lines elided |
↑ open up ↑ |
355 359 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
356 360 }
357 361
358 362 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
359 363 struct ecore_ptt *p_ptt,
360 364 u32 cmd, u32 param,
361 365 u32 *o_mcp_resp, u32 *o_mcp_param)
362 366 {
363 367 u32 delay = CHIP_MCP_RESP_ITER_US;
364 368 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
365 - u32 seq, cnt = 1, actual_mb_seq;
369 + u32 seq, cnt = 1, actual_mb_seq __unused;
366 370 enum _ecore_status_t rc = ECORE_SUCCESS;
367 371
368 372 #ifndef ASIC_ONLY
369 373 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
370 374 delay = EMUL_MCP_RESP_ITER_US;
371 375 /* There is a built-in delay of 100usec in each MFW response read */
372 376 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
373 377 max_retries /= 10;
374 378 #endif
375 379
376 380 /* Get actual driver mailbox sequence */
377 381 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
378 382 DRV_MSG_SEQ_NUMBER_MASK;
379 383
380 384 /* Use MCP history register to check if MCP reset occurred between
381 385 * init time and now.
382 386 */
383 387 if (p_hwfn->mcp_info->mcp_hist !=
384 388 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
385 389 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
386 390 ecore_load_mcp_offsets(p_hwfn, p_ptt);
387 391 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
388 392 }
389 393 seq = ++p_hwfn->mcp_info->drv_mb_seq;
390 394
391 395 /* Set drv param */
392 396 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
393 397
394 398 /* Set drv command along with the updated sequence */
395 399 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
396 400
397 401 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
398 402 "wrote command (%x) to MFW MB param 0x%08x\n",
399 403 (cmd | seq), param);
400 404
401 405 do {
402 406 /* Wait for MFW response */
403 407 OSAL_UDELAY(delay);
404 408 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
405 409
406 410 /* Give the FW up to 5 second (500*10ms) */
407 411 } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
408 412 (cnt++ < max_retries));
409 413
410 414 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
411 415 "[after %d ms] read (%x) seq is (%x) from FW MB\n",
412 416 cnt * delay, *o_mcp_resp, seq);
413 417
414 418 /* Is this a reply to our command? */
415 419 if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
416 420 *o_mcp_resp &= FW_MSG_CODE_MASK;
417 421 /* Get the MCP param */
418 422 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
419 423 } else {
420 424 /* FW BUG! */
421 425 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
422 426 cmd, param);
423 427 ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
424 428 *o_mcp_resp = 0;
425 429 rc = ECORE_AGAIN;
426 430 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
427 431 }
428 432 return rc;
429 433 }
430 434
431 435 static enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
432 436 struct ecore_ptt *p_ptt,
433 437 struct ecore_mcp_mb_params *p_mb_params)
434 438 {
435 439 union drv_union_data union_data;
436 440 u32 union_data_addr;
437 441 enum _ecore_status_t rc;
438 442
439 443 /* MCP not initialized */
440 444 if (!ecore_mcp_is_init(p_hwfn)) {
441 445 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
442 446 return ECORE_BUSY;
443 447 }
444 448
445 449 if (p_mb_params->data_src_size > sizeof(union_data) ||
446 450 p_mb_params->data_dst_size > sizeof(union_data)) {
447 451 DP_ERR(p_hwfn,
448 452 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
449 453 p_mb_params->data_src_size, p_mb_params->data_dst_size,
450 454 sizeof(union_data));
451 455 return ECORE_INVAL;
452 456 }
453 457
454 458 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
455 459 OFFSETOF(struct public_drv_mb, union_data);
456 460
457 461 /* Ensure that only a single thread is accessing the mailbox at a
458 462 * certain time.
459 463 */
460 464 rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
461 465 if (rc != ECORE_SUCCESS)
462 466 return rc;
463 467
464 468 OSAL_MEM_ZERO(&union_data, sizeof(union_data));
465 469 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
466 470 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
467 471 p_mb_params->data_src_size);
468 472 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
469 473 sizeof(union_data));
470 474
471 475 rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
472 476 p_mb_params->param, &p_mb_params->mcp_resp,
473 477 &p_mb_params->mcp_param);
474 478
475 479 if (p_mb_params->p_data_dst != OSAL_NULL &&
476 480 p_mb_params->data_dst_size)
477 481 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
478 482 union_data_addr, p_mb_params->data_dst_size);
479 483
480 484 ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
481 485
482 486 return rc;
483 487 }
484 488
485 489 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
486 490 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
487 491 u32 *o_mcp_resp, u32 *o_mcp_param)
488 492 {
489 493 struct ecore_mcp_mb_params mb_params;
490 494 enum _ecore_status_t rc;
491 495
492 496 #ifndef ASIC_ONLY
493 497 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
494 498 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
495 499 loaded--;
496 500 loaded_port[p_hwfn->port_id]--;
497 501 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
498 502 loaded);
499 503 }
500 504 return ECORE_SUCCESS;
501 505 }
502 506 #endif
503 507
504 508 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
505 509 mb_params.cmd = cmd;
506 510 mb_params.param = param;
507 511 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
508 512 if (rc != ECORE_SUCCESS)
509 513 return rc;
510 514
511 515 *o_mcp_resp = mb_params.mcp_resp;
512 516 *o_mcp_param = mb_params.mcp_param;
513 517
514 518 return ECORE_SUCCESS;
515 519 }
516 520
517 521 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
518 522 struct ecore_ptt *p_ptt,
519 523 u32 cmd,
520 524 u32 param,
521 525 u32 *o_mcp_resp,
522 526 u32 *o_mcp_param,
523 527 u32 i_txn_size,
524 528 u32 *i_buf)
525 529 {
526 530 struct ecore_mcp_mb_params mb_params;
527 531 enum _ecore_status_t rc;
528 532
529 533 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
530 534 mb_params.cmd = cmd;
531 535 mb_params.param = param;
532 536 mb_params.p_data_src = i_buf;
533 537 mb_params.data_src_size = (u8) i_txn_size;
534 538 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
535 539 if (rc != ECORE_SUCCESS)
536 540 return rc;
537 541
538 542 *o_mcp_resp = mb_params.mcp_resp;
539 543 *o_mcp_param = mb_params.mcp_param;
540 544
541 545 return ECORE_SUCCESS;
542 546 }
543 547
544 548 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
545 549 struct ecore_ptt *p_ptt,
546 550 u32 cmd,
547 551 u32 param,
548 552 u32 *o_mcp_resp,
549 553 u32 *o_mcp_param,
550 554 u32 *o_txn_size,
551 555 u32 *o_buf)
552 556 {
553 557 struct ecore_mcp_mb_params mb_params;
554 558 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
555 559 enum _ecore_status_t rc;
556 560
557 561 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
558 562 mb_params.cmd = cmd;
559 563 mb_params.param = param;
560 564 mb_params.p_data_dst = raw_data;
561 565
562 566 /* Use the maximal value since the actual one is part of the response */
563 567 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
564 568
565 569 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
566 570 if (rc != ECORE_SUCCESS)
567 571 return rc;
568 572
569 573 *o_mcp_resp = mb_params.mcp_resp;
570 574 *o_mcp_param = mb_params.mcp_param;
571 575
572 576 *o_txn_size = *o_mcp_param;
573 577 OSAL_MEMCPY(o_buf, raw_data, *o_txn_size);
574 578
575 579 return ECORE_SUCCESS;
576 580 }
577 581
578 582 #ifndef ASIC_ONLY
579 583 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
580 584 u32 *p_load_code)
581 585 {
582 586 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
583 587
584 588 if (!loaded) {
585 589 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
586 590 } else if (!loaded_port[p_hwfn->port_id]) {
587 591 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
588 592 } else {
589 593 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
590 594 }
591 595
592 596 /* On CMT, always tell that it's engine */
593 597 if (p_hwfn->p_dev->num_hwfns > 1)
594 598 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
595 599
596 600 *p_load_code = load_phase;
597 601 loaded++;
598 602 loaded_port[p_hwfn->port_id]++;
599 603
600 604 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
601 605 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
602 606 *p_load_code, loaded, p_hwfn->port_id,
603 607 loaded_port[p_hwfn->port_id]);
604 608 }
605 609 #endif
606 610
607 611 static bool
608 612 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
609 613 enum ecore_override_force_load override_force_load)
610 614 {
611 615 bool can_force_load = false;
612 616
613 617 switch (override_force_load) {
614 618 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
615 619 can_force_load = true;
616 620 break;
617 621 case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
618 622 can_force_load = false;
619 623 break;
620 624 default:
621 625 can_force_load = (drv_role == DRV_ROLE_OS &&
622 626 exist_drv_role == DRV_ROLE_PREBOOT) ||
623 627 (drv_role == DRV_ROLE_KDUMP &&
624 628 exist_drv_role == DRV_ROLE_OS);
625 629 break;
626 630 }
627 631
628 632 return can_force_load;
629 633 }
630 634
631 635 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
632 636 struct ecore_ptt *p_ptt)
633 637 {
634 638 u32 resp = 0, param = 0;
635 639 enum _ecore_status_t rc;
636 640
637 641 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
638 642 &resp, ¶m);
639 643 if (rc != ECORE_SUCCESS)
640 644 DP_NOTICE(p_hwfn, false,
641 645 "Failed to send cancel load request, rc = %d\n", rc);
642 646
643 647 return rc;
644 648 }
645 649
646 650 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
647 651 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
648 652 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
649 653 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
650 654 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
651 655 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
652 656 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
653 657
654 658 static u32 ecore_get_config_bitmap(void)
655 659 {
656 660 u32 config_bitmap = 0x0;
657 661
658 662 #ifdef CONFIG_ECORE_L2
659 663 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
660 664 #endif
661 665 #ifdef CONFIG_ECORE_SRIOV
662 666 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
663 667 #endif
664 668 #ifdef CONFIG_ECORE_ROCE
665 669 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
666 670 #endif
667 671 #ifdef CONFIG_ECORE_IWARP
668 672 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
669 673 #endif
670 674 #ifdef CONFIG_ECORE_FCOE
671 675 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
672 676 #endif
673 677 #ifdef CONFIG_ECORE_ISCSI
674 678 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
675 679 #endif
676 680 #ifdef CONFIG_ECORE_LL2
677 681 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
678 682 #endif
679 683
680 684 return config_bitmap;
681 685 }
682 686
683 687 struct ecore_load_req_in_params {
684 688 u8 hsi_ver;
685 689 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
686 690 #define ECORE_LOAD_REQ_HSI_VER_1 1
687 691 u32 drv_ver_0;
688 692 u32 drv_ver_1;
689 693 u32 fw_ver;
690 694 u8 drv_role;
691 695 u8 timeout_val;
692 696 u8 force_cmd;
693 697 bool avoid_eng_reset;
694 698 };
695 699
696 700 struct ecore_load_req_out_params {
697 701 u32 load_code;
698 702 u32 exist_drv_ver_0;
699 703 u32 exist_drv_ver_1;
700 704 u32 exist_fw_ver;
701 705 u8 exist_drv_role;
702 706 u8 mfw_hsi_ver;
703 707 bool drv_exists;
704 708 };
705 709
706 710 static enum _ecore_status_t
707 711 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
708 712 struct ecore_load_req_in_params *p_in_params,
709 713 struct ecore_load_req_out_params *p_out_params)
710 714 {
711 715 struct ecore_mcp_mb_params mb_params;
712 716 struct load_req_stc load_req;
713 717 struct load_rsp_stc load_rsp;
714 718 u32 hsi_ver;
715 719 enum _ecore_status_t rc;
716 720
717 721 OSAL_MEM_ZERO(&load_req, sizeof(load_req));
718 722 load_req.drv_ver_0 = p_in_params->drv_ver_0;
719 723 load_req.drv_ver_1 = p_in_params->drv_ver_1;
720 724 load_req.fw_ver = p_in_params->fw_ver;
721 725 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE,
722 726 p_in_params->drv_role);
723 727 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
724 728 p_in_params->timeout_val);
725 729 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
726 730 p_in_params->force_cmd);
727 731 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
728 732 p_in_params->avoid_eng_reset);
729 733
730 734 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
731 735 DRV_ID_MCP_HSI_VER_CURRENT :
732 736 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
733 737
734 738 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
735 739 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
736 740 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
737 741 mb_params.p_data_src = &load_req;
738 742 mb_params.data_src_size = sizeof(load_req);
739 743 mb_params.p_data_dst = &load_rsp;
740 744 mb_params.data_dst_size = sizeof(load_rsp);
741 745
742 746 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
743 747 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
744 748 mb_params.param,
745 749 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
746 750 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
747 751 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
748 752 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
749 753
750 754 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
751 755 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
752 756 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
753 757 load_req.drv_ver_0, load_req.drv_ver_1,
754 758 load_req.fw_ver, load_req.misc0,
755 759 ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
756 760 ECORE_MFW_GET_FIELD(load_req.misc0,
757 761 LOAD_REQ_LOCK_TO),
758 762 ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
759 763 ECORE_MFW_GET_FIELD(load_req.misc0,
760 764 LOAD_REQ_FLAGS0));
761 765
762 766 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
763 767 if (rc != ECORE_SUCCESS) {
764 768 DP_NOTICE(p_hwfn, false,
765 769 "Failed to send load request, rc = %d\n", rc);
766 770 return rc;
767 771 }
768 772
769 773 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
770 774 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
771 775 p_out_params->load_code = mb_params.mcp_resp;
772 776
773 777 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
774 778 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
775 779 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
776 780 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
777 781 load_rsp.drv_ver_0, load_rsp.drv_ver_1,
778 782 load_rsp.fw_ver, load_rsp.misc0,
779 783 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
780 784 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
781 785 ECORE_MFW_GET_FIELD(load_rsp.misc0,
782 786 LOAD_RSP_FLAGS0));
783 787
784 788 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
785 789 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
786 790 p_out_params->exist_fw_ver = load_rsp.fw_ver;
787 791 p_out_params->exist_drv_role =
788 792 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
789 793 p_out_params->mfw_hsi_ver =
790 794 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
791 795 p_out_params->drv_exists =
792 796 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
793 797 LOAD_RSP_FLAGS0_DRV_EXISTS;
794 798 }
795 799
796 800 return ECORE_SUCCESS;
797 801 }
798 802
799 803 static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
800 804 enum ecore_drv_role drv_role,
801 805 u8 *p_mfw_drv_role)
802 806 {
803 807 switch (drv_role)
804 808 {
805 809 case ECORE_DRV_ROLE_OS:
806 810 *p_mfw_drv_role = DRV_ROLE_OS;
807 811 break;
808 812 case ECORE_DRV_ROLE_KDUMP:
809 813 *p_mfw_drv_role = DRV_ROLE_KDUMP;
810 814 break;
811 815 default:
812 816 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
813 817 return ECORE_INVAL;
814 818 }
815 819
816 820 return ECORE_SUCCESS;
817 821 }
818 822
819 823 enum ecore_load_req_force {
820 824 ECORE_LOAD_REQ_FORCE_NONE,
821 825 ECORE_LOAD_REQ_FORCE_PF,
822 826 ECORE_LOAD_REQ_FORCE_ALL,
823 827 };
824 828
825 829 static enum _ecore_status_t
826 830 ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
827 831 enum ecore_load_req_force force_cmd,
828 832 u8 *p_mfw_force_cmd)
829 833 {
830 834 switch (force_cmd) {
831 835 case ECORE_LOAD_REQ_FORCE_NONE:
832 836 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
833 837 break;
834 838 case ECORE_LOAD_REQ_FORCE_PF:
835 839 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
836 840 break;
837 841 case ECORE_LOAD_REQ_FORCE_ALL:
838 842 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
839 843 break;
840 844 default:
841 845 DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
842 846 return ECORE_INVAL;
843 847 }
844 848
845 849 return ECORE_SUCCESS;
846 850 }
847 851
848 852 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
849 853 struct ecore_ptt *p_ptt,
850 854 struct ecore_load_req_params *p_params)
851 855 {
852 856 struct ecore_load_req_out_params out_params;
853 857 struct ecore_load_req_in_params in_params;
854 858 u8 mfw_drv_role, mfw_force_cmd;
855 859 enum _ecore_status_t rc;
856 860
857 861 #ifndef ASIC_ONLY
858 862 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
859 863 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
860 864 return ECORE_SUCCESS;
861 865 }
862 866 #endif
863 867
864 868 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
865 869 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
866 870 in_params.drv_ver_0 = ECORE_VERSION;
867 871 in_params.drv_ver_1 = ecore_get_config_bitmap();
868 872 in_params.fw_ver = STORM_FW_VERSION;
869 873 rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
870 874 if (rc != ECORE_SUCCESS)
871 875 return rc;
872 876
873 877 in_params.drv_role = mfw_drv_role;
874 878 in_params.timeout_val = p_params->timeout_val;
875 879 rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
876 880 &mfw_force_cmd);
877 881 if (rc != ECORE_SUCCESS)
878 882 return rc;
879 883
880 884 in_params.force_cmd = mfw_force_cmd;
881 885 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
882 886
883 887 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
884 888 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
885 889 if (rc != ECORE_SUCCESS)
886 890 return rc;
887 891
888 892 /* First handle cases where another load request should/might be sent:
889 893 * - MFW expects the old interface [HSI version = 1]
890 894 * - MFW responds that a force load request is required
891 895 */
892 896 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
893 897 DP_INFO(p_hwfn,
894 898 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
895 899
896 900 /* The previous load request set the mailbox blocking */
897 901 p_hwfn->mcp_info->block_mb_sending = false;
898 902
899 903 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
900 904 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
901 905 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
902 906 &out_params);
903 907 if (rc != ECORE_SUCCESS)
904 908 return rc;
905 909 } else if (out_params.load_code ==
906 910 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
907 911 /* The previous load request set the mailbox blocking */
908 912 p_hwfn->mcp_info->block_mb_sending = false;
909 913
910 914 if (ecore_mcp_can_force_load(in_params.drv_role,
911 915 out_params.exist_drv_role,
912 916 p_params->override_force_load)) {
913 917 DP_INFO(p_hwfn,
914 918 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
915 919 in_params.drv_role, in_params.fw_ver,
916 920 in_params.drv_ver_1, in_params.drv_ver_0,
917 921 out_params.exist_drv_role,
918 922 out_params.exist_fw_ver,
919 923 out_params.exist_drv_ver_1,
920 924 out_params.exist_drv_ver_0);
921 925 DP_INFO(p_hwfn, "Sending a force load request\n");
922 926
923 927 rc = ecore_get_mfw_force_cmd(p_hwfn,
924 928 ECORE_LOAD_REQ_FORCE_ALL,
925 929 &mfw_force_cmd);
926 930 if (rc != ECORE_SUCCESS)
927 931 return rc;
928 932
929 933 in_params.force_cmd = mfw_force_cmd;
930 934 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
931 935 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
932 936 &out_params);
933 937 if (rc != ECORE_SUCCESS)
934 938 return rc;
935 939 } else {
936 940 DP_NOTICE(p_hwfn, false,
937 941 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
938 942 in_params.drv_role, in_params.fw_ver,
939 943 in_params.drv_ver_0, in_params.drv_ver_1,
940 944 out_params.exist_drv_role,
941 945 out_params.exist_fw_ver,
942 946 out_params.exist_drv_ver_0,
943 947 out_params.exist_drv_ver_1);
944 948 DP_NOTICE(p_hwfn, false,
945 949 "Avoid sending a force load request to prevent disruption of active PFs\n");
946 950
947 951 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
948 952 return ECORE_BUSY;
949 953 }
950 954 }
951 955
952 956 /* Now handle the other types of responses.
953 957 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
954 958 * expected here after the additional revised load requests were sent.
955 959 */
956 960 switch (out_params.load_code) {
957 961 case FW_MSG_CODE_DRV_LOAD_ENGINE:
958 962 case FW_MSG_CODE_DRV_LOAD_PORT:
959 963 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
960 964 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
961 965 out_params.drv_exists) {
962 966 /* The role and fw/driver version match, but the PF is
963 967 * already loaded and has not been unloaded gracefully.
964 968 * This is unexpected since a quasi-FLR request was
965 969 * previously sent as part of ecore_hw_prepare().
966 970 */
967 971 DP_NOTICE(p_hwfn, false,
968 972 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
969 973 return ECORE_INVAL;
970 974 }
971 975 break;
972 976 case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
973 977 case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
974 978 case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
975 979 case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
976 980 DP_NOTICE(p_hwfn, false,
977 981 "MFW refused a load request [resp 0x%08x]. Aborting.\n",
978 982 out_params.load_code);
979 983 return ECORE_BUSY;
980 984 default:
981 985 DP_NOTICE(p_hwfn, false,
982 986 "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
983 987 out_params.load_code);
984 988 break;
985 989 }
986 990
987 991 p_params->load_code = out_params.load_code;
988 992
989 993 return ECORE_SUCCESS;
990 994 }
991 995
992 996 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
993 997 struct ecore_ptt *p_ptt)
994 998 {
995 999 u32 wol_param, mcp_resp, mcp_param;
996 1000
997 1001 switch (p_hwfn->p_dev->wol_config) {
998 1002 case ECORE_OV_WOL_DISABLED:
999 1003 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1000 1004 break;
1001 1005 case ECORE_OV_WOL_ENABLED:
1002 1006 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1003 1007 break;
1004 1008 default:
1005 1009 DP_NOTICE(p_hwfn, true,
1006 1010 "Unknown WoL configuration %02x\n",
1007 1011 p_hwfn->p_dev->wol_config);
1008 1012 /* Fallthrough */
1009 1013 case ECORE_OV_WOL_DEFAULT:
1010 1014 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1011 1015 }
1012 1016
1013 1017 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1014 1018 &mcp_resp, &mcp_param);
1015 1019 }
1016 1020
1017 1021 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1018 1022 struct ecore_ptt *p_ptt)
1019 1023 {
1020 1024 struct ecore_mcp_mb_params mb_params;
1021 1025 struct mcp_mac wol_mac;
1022 1026
1023 1027 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1024 1028 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1025 1029
1026 1030 /* Set the primary MAC if WoL is enabled */
1027 1031 if (p_hwfn->p_dev->wol_config == ECORE_OV_WOL_ENABLED) {
1028 1032 u8 *p_mac = p_hwfn->p_dev->wol_mac;
1029 1033
1030 1034 OSAL_MEM_ZERO(&wol_mac, sizeof(wol_mac));
1031 1035 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1032 1036 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1033 1037 p_mac[4] << 8 | p_mac[5];
1034 1038
1035 1039 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFDOWN),
1036 1040 "Setting WoL MAC: %02x:%02x:%02x:%02x:%02x:%02x --> [%08x,%08x]\n",
1037 1041 p_mac[0], p_mac[1], p_mac[2], p_mac[3], p_mac[4],
1038 1042 p_mac[5], wol_mac.mac_upper, wol_mac.mac_lower);
1039 1043
1040 1044 mb_params.p_data_src = &wol_mac;
1041 1045 mb_params.data_src_size = sizeof(wol_mac);
1042 1046 }
1043 1047
1044 1048 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1045 1049 }
1046 1050
1047 1051 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1048 1052 struct ecore_ptt *p_ptt)
1049 1053 {
1050 1054 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1051 1055 PUBLIC_PATH);
1052 1056 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1053 1057 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1054 1058 ECORE_PATH_ID(p_hwfn));
1055 1059 u32 disabled_vfs[VF_MAX_STATIC / 32];
1056 1060 int i;
1057 1061
1058 1062 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1059 1063 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1060 1064 mfw_path_offsize, path_addr);
1061 1065
1062 1066 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1063 1067 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1064 1068 path_addr +
1065 1069 OFFSETOF(struct public_path,
1066 1070 mcp_vf_disabled) +
1067 1071 sizeof(u32) * i);
1068 1072 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1069 1073 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1070 1074 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1071 1075 }
1072 1076
1073 1077 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1074 1078 OSAL_VF_FLR_UPDATE(p_hwfn);
1075 1079 }
1076 1080
1077 1081 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1078 1082 struct ecore_ptt *p_ptt,
1079 1083 u32 *vfs_to_ack)
1080 1084 {
1081 1085 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1082 1086 PUBLIC_FUNC);
1083 1087 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1084 1088 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1085 1089 MCP_PF_ID(p_hwfn));
1086 1090 struct ecore_mcp_mb_params mb_params;
1087 1091 enum _ecore_status_t rc;
1088 1092 int i;
1089 1093
1090 1094 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1091 1095 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1092 1096 "Acking VFs [%08x,...,%08x] - %08x\n",
1093 1097 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1094 1098
1095 1099 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1096 1100 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1097 1101 mb_params.p_data_src = vfs_to_ack;
1098 1102 mb_params.data_src_size = VF_MAX_STATIC / 8;
1099 1103 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1100 1104 if (rc != ECORE_SUCCESS) {
1101 1105 DP_NOTICE(p_hwfn, false,
1102 1106 "Failed to pass ACK for VF flr to MFW\n");
1103 1107 return ECORE_TIMEOUT;
1104 1108 }
1105 1109
1106 1110 /* TMP - clear the ACK bits; should be done by MFW */
1107 1111 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1108 1112 ecore_wr(p_hwfn, p_ptt,
1109 1113 func_addr +
1110 1114 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1111 1115 i * sizeof(u32), 0);
1112 1116
1113 1117 return rc;
1114 1118 }
1115 1119
1116 1120 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1117 1121 struct ecore_ptt *p_ptt)
1118 1122 {
1119 1123 u32 transceiver_state;
1120 1124
1121 1125 transceiver_state = ecore_rd(p_hwfn, p_ptt,
1122 1126 p_hwfn->mcp_info->port_addr +
1123 1127 OFFSETOF(struct public_port,
1124 1128 transceiver_data));
1125 1129
1126 1130 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1127 1131 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1128 1132 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1129 1133 OFFSETOF(struct public_port,
1130 1134 transceiver_data)));
1131 1135
1132 1136 transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
1133 1137
1134 1138 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1135 1139 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1136 1140 else
1137 1141 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1138 1142 }
1139 1143
1140 1144 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1141 1145 struct ecore_ptt *p_ptt,
1142 1146 struct ecore_mcp_link_state *p_link)
1143 1147 {
1144 1148 u32 eee_status, val;
1145 1149
1146 1150 p_link->eee_adv_caps = 0;
1147 1151 p_link->eee_lp_adv_caps = 0;
1148 1152 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1149 1153 OFFSETOF(struct public_port, eee_status));
1150 1154 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1151 1155 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_SHIFT;
1152 1156 if (val & EEE_1G_ADV)
1153 1157 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1154 1158 if (val & EEE_10G_ADV)
1155 1159 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1156 1160 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_SHIFT;
1157 1161 if (val & EEE_1G_ADV)
1158 1162 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1159 1163 if (val & EEE_10G_ADV)
1160 1164 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1161 1165 }
1162 1166
1163 1167 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1164 1168 struct ecore_ptt *p_ptt,
1165 1169 bool b_reset)
1166 1170 {
1167 1171 struct ecore_mcp_link_state *p_link;
1168 1172 u8 max_bw, min_bw;
1169 1173 u32 status = 0;
1170 1174
1171 1175 /* Prevent SW/attentions from doing this at the same time */
1172 1176 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1173 1177
1174 1178 p_link = &p_hwfn->mcp_info->link_output;
1175 1179 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1176 1180 if (!b_reset) {
1177 1181 status = ecore_rd(p_hwfn, p_ptt,
1178 1182 p_hwfn->mcp_info->port_addr +
1179 1183 OFFSETOF(struct public_port, link_status));
1180 1184 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1181 1185 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1182 1186 status, (u32)(p_hwfn->mcp_info->port_addr +
1183 1187 OFFSETOF(struct public_port, link_status)));
1184 1188 } else {
1185 1189 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1186 1190 "Resetting link indications\n");
1187 1191 goto out;
1188 1192 }
1189 1193
1190 1194 if (p_hwfn->b_drv_link_init)
1191 1195 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1192 1196 else
1193 1197 p_link->link_up = false;
1194 1198
1195 1199 p_link->full_duplex = true;
1196 1200 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1197 1201 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1198 1202 p_link->speed = 100000;
1199 1203 break;
1200 1204 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1201 1205 p_link->speed = 50000;
1202 1206 break;
1203 1207 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1204 1208 p_link->speed = 40000;
1205 1209 break;
1206 1210 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1207 1211 p_link->speed = 25000;
1208 1212 break;
1209 1213 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1210 1214 p_link->speed = 20000;
1211 1215 break;
1212 1216 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1213 1217 p_link->speed = 10000;
1214 1218 break;
1215 1219 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1216 1220 p_link->full_duplex = false;
1217 1221 /* Fall-through */
1218 1222 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1219 1223 p_link->speed = 1000;
1220 1224 break;
1221 1225 default:
1222 1226 p_link->speed = 0;
1223 1227 }
1224 1228
1225 1229 /* We never store total line speed as p_link->speed is
1226 1230 * again changes according to bandwidth allocation.
1227 1231 */
1228 1232 if (p_link->link_up && p_link->speed)
1229 1233 p_link->line_speed = p_link->speed;
1230 1234 else
1231 1235 p_link->line_speed = 0;
1232 1236
1233 1237 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1234 1238 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1235 1239
1236 1240 /* Max bandwidth configuration */
1237 1241 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1238 1242
1239 1243 /* Mintz bandwidth configuration */
1240 1244 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1241 1245 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1242 1246 p_link->min_pf_rate);
1243 1247
1244 1248 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1245 1249 p_link->an_complete = !!(status &
1246 1250 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1247 1251 p_link->parallel_detection = !!(status &
1248 1252 LINK_STATUS_PARALLEL_DETECTION_USED);
1249 1253 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1250 1254
1251 1255 p_link->partner_adv_speed |=
1252 1256 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1253 1257 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1254 1258 p_link->partner_adv_speed |=
1255 1259 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1256 1260 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1257 1261 p_link->partner_adv_speed |=
1258 1262 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1259 1263 ECORE_LINK_PARTNER_SPEED_10G : 0;
1260 1264 p_link->partner_adv_speed |=
1261 1265 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1262 1266 ECORE_LINK_PARTNER_SPEED_20G : 0;
1263 1267 p_link->partner_adv_speed |=
1264 1268 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1265 1269 ECORE_LINK_PARTNER_SPEED_25G : 0;
1266 1270 p_link->partner_adv_speed |=
1267 1271 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1268 1272 ECORE_LINK_PARTNER_SPEED_40G : 0;
1269 1273 p_link->partner_adv_speed |=
1270 1274 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1271 1275 ECORE_LINK_PARTNER_SPEED_50G : 0;
1272 1276 p_link->partner_adv_speed |=
1273 1277 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1274 1278 ECORE_LINK_PARTNER_SPEED_100G : 0;
1275 1279
1276 1280 p_link->partner_tx_flow_ctrl_en =
1277 1281 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1278 1282 p_link->partner_rx_flow_ctrl_en =
1279 1283 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1280 1284
1281 1285 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1282 1286 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1283 1287 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1284 1288 break;
1285 1289 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1286 1290 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1287 1291 break;
1288 1292 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1289 1293 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1290 1294 break;
1291 1295 default:
1292 1296 p_link->partner_adv_pause = 0;
1293 1297 }
1294 1298
1295 1299 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1296 1300
1297 1301 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1298 1302 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1299 1303
1300 1304 OSAL_LINK_UPDATE(p_hwfn);
1301 1305 out:
1302 1306 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1303 1307 }
1304 1308
1305 1309 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1306 1310 struct ecore_ptt *p_ptt,
1307 1311 bool b_up)
1308 1312 {
1309 1313 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1310 1314 struct ecore_mcp_mb_params mb_params;
1311 1315 struct eth_phy_cfg phy_cfg;
1312 1316 enum _ecore_status_t rc = ECORE_SUCCESS;
1313 1317 u32 cmd;
1314 1318
1315 1319 #ifndef ASIC_ONLY
1316 1320 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1317 1321 return ECORE_SUCCESS;
1318 1322 #endif
1319 1323
1320 1324 /* Set the shmem configuration according to params */
1321 1325 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1322 1326 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1323 1327 if (!params->speed.autoneg)
1324 1328 phy_cfg.speed = params->speed.forced_speed;
1325 1329 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1326 1330 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1327 1331 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1328 1332 phy_cfg.adv_speed = params->speed.advertised_speeds;
1329 1333 phy_cfg.loopback_mode = params->loopback_mode;
1330 1334 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
1331 1335 if (params->eee.enable)
1332 1336 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1333 1337 if (params->eee.tx_lpi_enable)
1334 1338 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1335 1339 if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1336 1340 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1337 1341 if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1338 1342 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1339 1343 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1340 1344 EEE_TX_TIMER_USEC_SHIFT) &
1341 1345 EEE_TX_TIMER_USEC_MASK;
1342 1346 }
1343 1347
1344 1348 p_hwfn->b_drv_link_init = b_up;
1345 1349
1346 1350 if (b_up)
1347 1351 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1348 1352 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1349 1353 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1350 1354 phy_cfg.loopback_mode);
1351 1355 else
1352 1356 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1353 1357
1354 1358 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1355 1359 mb_params.cmd = cmd;
1356 1360 mb_params.p_data_src = &phy_cfg;
1357 1361 mb_params.data_src_size = sizeof(phy_cfg);
1358 1362 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1359 1363
1360 1364 /* if mcp fails to respond we must abort */
1361 1365 if (rc != ECORE_SUCCESS) {
1362 1366 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1363 1367 return rc;
1364 1368 }
1365 1369
1366 1370 /* Mimic link-change attention, done for several reasons:
1367 1371 * - On reset, there's no guarantee MFW would trigger
1368 1372 * an attention.
1369 1373 * - On initialization, older MFWs might not indicate link change
1370 1374 * during LFA, so we'll never get an UP indication.
1371 1375 */
1372 1376 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1373 1377
1374 1378 return rc;
1375 1379 }
1376 1380
1377 1381 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1378 1382 struct ecore_ptt *p_ptt)
1379 1383 {
1380 1384 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1381 1385
1382 1386 /* TODO - Add support for VFs */
1383 1387 if (IS_VF(p_hwfn->p_dev))
1384 1388 return ECORE_INVAL;
1385 1389
1386 1390 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1387 1391 PUBLIC_PATH);
1388 1392 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1389 1393 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1390 1394
1391 1395 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1392 1396 path_addr +
1393 1397 OFFSETOF(struct public_path, process_kill)) &
1394 1398 PROCESS_KILL_COUNTER_MASK;
1395 1399
1396 1400 return proc_kill_cnt;
1397 1401 }
1398 1402
1399 1403 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1400 1404 struct ecore_ptt *p_ptt)
1401 1405 {
1402 1406 struct ecore_dev *p_dev = p_hwfn->p_dev;
1403 1407 u32 proc_kill_cnt;
1404 1408
1405 1409 /* Prevent possible attentions/interrupts during the recovery handling
1406 1410 * and till its load phase, during which they will be re-enabled.
1407 1411 */
1408 1412 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1409 1413
1410 1414 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1411 1415
1412 1416 /* The following operations should be done once, and thus in CMT mode
1413 1417 * are carried out by only the first HW function.
1414 1418 */
1415 1419 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1416 1420 return;
1417 1421
1418 1422 if (p_dev->recov_in_prog) {
1419 1423 DP_NOTICE(p_hwfn, false,
1420 1424 "Ignoring the indication since a recovery process is already in progress\n");
1421 1425 return;
1422 1426 }
1423 1427
1424 1428 p_dev->recov_in_prog = true;
1425 1429
↓ open down ↓ |
1050 lines elided |
↑ open up ↑ |
1426 1430 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1427 1431 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1428 1432
1429 1433 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1430 1434 }
1431 1435
1432 1436 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1433 1437 struct ecore_ptt *p_ptt,
1434 1438 enum MFW_DRV_MSG_TYPE type)
1435 1439 {
1436 - enum ecore_mcp_protocol_type stats_type;
1440 + enum ecore_mcp_protocol_type stats_type __unused;
1437 1441 union ecore_mcp_protocol_stats stats;
1438 1442 struct ecore_mcp_mb_params mb_params;
1439 1443 u32 hsi_param;
1440 1444 enum _ecore_status_t rc;
1441 1445
1442 1446 switch (type) {
1443 1447 case MFW_DRV_MSG_GET_LAN_STATS:
1444 1448 stats_type = ECORE_MCP_LAN_STATS;
1445 1449 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1446 1450 break;
1447 1451 case MFW_DRV_MSG_GET_FCOE_STATS:
1448 1452 stats_type = ECORE_MCP_FCOE_STATS;
1449 1453 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1450 1454 break;
1451 1455 case MFW_DRV_MSG_GET_ISCSI_STATS:
1452 1456 stats_type = ECORE_MCP_ISCSI_STATS;
1453 1457 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1454 1458 break;
1455 1459 case MFW_DRV_MSG_GET_RDMA_STATS:
1456 1460 stats_type = ECORE_MCP_RDMA_STATS;
1457 1461 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1458 1462 break;
1459 1463 default:
1460 1464 DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
1461 1465 return;
1462 1466 }
1463 1467
1464 1468 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1465 1469
1466 1470 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1467 1471 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1468 1472 mb_params.param = hsi_param;
1469 1473 mb_params.p_data_src = &stats;
1470 1474 mb_params.data_src_size = sizeof(stats);
1471 1475 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1472 1476 if (rc != ECORE_SUCCESS)
1473 1477 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1474 1478 }
1475 1479
1476 1480 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1477 1481 struct public_func *p_shmem_info)
1478 1482 {
1479 1483 struct ecore_mcp_function_info *p_info;
1480 1484
1481 1485 p_info = &p_hwfn->mcp_info->func_info;
1482 1486
1483 1487 /* TODO - bandwidth min/max should have valid values of 1-100,
1484 1488 * as well as some indication that the feature is disabled.
1485 1489 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1486 1490 * limit and correct value to min `1' and max `100' if limit isn't in
1487 1491 * range.
1488 1492 */
1489 1493 p_info->bandwidth_min = (p_shmem_info->config &
1490 1494 FUNC_MF_CFG_MIN_BW_MASK) >>
1491 1495 FUNC_MF_CFG_MIN_BW_SHIFT;
1492 1496 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1493 1497 DP_INFO(p_hwfn,
1494 1498 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1495 1499 p_info->bandwidth_min);
1496 1500 p_info->bandwidth_min = 1;
1497 1501 }
1498 1502
1499 1503 p_info->bandwidth_max = (p_shmem_info->config &
1500 1504 FUNC_MF_CFG_MAX_BW_MASK) >>
1501 1505 FUNC_MF_CFG_MAX_BW_SHIFT;
1502 1506 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1503 1507 DP_INFO(p_hwfn,
1504 1508 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1505 1509 p_info->bandwidth_max);
1506 1510 p_info->bandwidth_max = 100;
1507 1511 }
1508 1512 }
1509 1513
1510 1514 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1511 1515 struct ecore_ptt *p_ptt,
1512 1516 struct public_func *p_data,
1513 1517 int pfid)
1514 1518 {
1515 1519 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1516 1520 PUBLIC_FUNC);
1517 1521 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1518 1522 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1519 1523 u32 i, size;
1520 1524
1521 1525 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1522 1526
1523 1527 size = OSAL_MIN_T(u32, sizeof(*p_data),
1524 1528 SECTION_SIZE(mfw_path_offsize));
1525 1529 for (i = 0; i < size / sizeof(u32); i++)
1526 1530 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1527 1531 func_addr + (i << 2));
1528 1532
1529 1533 return size;
1530 1534 }
1531 1535 #if 0
1532 1536 /* This was introduced with FW 8.10.5.0; Hopefully this is only temp. */
1533 1537 enum _ecore_status_t ecore_hw_init_first_eth(struct ecore_hwfn *p_hwfn,
1534 1538 struct ecore_ptt *p_ptt,
1535 1539 u8 *p_pf)
1536 1540 {
1537 1541 struct public_func shmem_info;
1538 1542 int i;
1539 1543
1540 1544 /* Find first Ethernet interface in port */
1541 1545 for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->p_dev);
1542 1546 i += p_hwfn->p_dev->num_ports_in_engine) {
1543 1547 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1544 1548 MCP_PF_ID_BY_REL(p_hwfn, i));
1545 1549
1546 1550 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1547 1551 continue;
1548 1552
1549 1553 if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
1550 1554 FUNC_MF_CFG_PROTOCOL_ETHERNET) {
1551 1555 *p_pf = (u8)i;
1552 1556 return ECORE_SUCCESS;
1553 1557 }
1554 1558 }
1555 1559
1556 1560 /* This might actually be valid somewhere in the future but for now
1557 1561 * it's highly unlikely.
1558 1562 */
1559 1563 DP_NOTICE(p_hwfn, false,
1560 1564 "Failed to find on port an ethernet interface in MF_SI mode\n");
1561 1565
1562 1566 return ECORE_INVAL;
1563 1567 }
1564 1568 #endif
1565 1569 static void
1566 1570 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1567 1571 {
1568 1572 struct ecore_mcp_function_info *p_info;
1569 1573 struct public_func shmem_info;
1570 1574 u32 resp = 0, param = 0;
1571 1575
1572 1576 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1573 1577 MCP_PF_ID(p_hwfn));
1574 1578
1575 1579 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1576 1580
1577 1581 p_info = &p_hwfn->mcp_info->func_info;
1578 1582
1579 1583 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1580 1584
1581 1585 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1582 1586
1583 1587 /* Acknowledge the MFW */
1584 1588 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1585 1589 ¶m);
1586 1590 }
1587 1591
1588 1592 static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
1589 1593 struct ecore_ptt *p_ptt)
1590 1594 {
1591 1595 struct public_func shmem_info;
1592 1596 u32 resp = 0, param = 0;
1593 1597
1594 1598 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1595 1599 MCP_PF_ID(p_hwfn));
1596 1600
1597 1601 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1598 1602 FUNC_MF_CFG_OV_STAG_MASK;
1599 1603 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1600 1604 if ((p_hwfn->hw_info.hw_mode & (1 << MODE_MF_SD)) &&
1601 1605 (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET)) {
1602 1606 ecore_wr(p_hwfn, p_ptt,
1603 1607 NIG_REG_LLH_FUNC_TAG_VALUE,
1604 1608 p_hwfn->hw_info.ovlan);
1605 1609 ecore_sp_pf_update_stag(p_hwfn);
1606 1610 }
1607 1611
1608 1612 OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
1609 1613
1610 1614 /* Acknowledge the MFW */
1611 1615 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1612 1616 &resp, ¶m);
1613 1617 }
1614 1618
1615 1619 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1616 1620 struct ecore_ptt *p_ptt)
1617 1621 {
1618 1622 /* A single notification should be sent to upper driver in CMT mode */
1619 1623 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1620 1624 return;
1621 1625
1622 1626 DP_NOTICE(p_hwfn, false,
1623 1627 "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1624 1628
1625 1629 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1626 1630 }
1627 1631
1628 1632 struct ecore_mdump_cmd_params {
1629 1633 u32 cmd;
1630 1634 void *p_data_src;
1631 1635 u8 data_src_size;
1632 1636 void *p_data_dst;
1633 1637 u8 data_dst_size;
1634 1638 u32 mcp_resp;
1635 1639 };
1636 1640
1637 1641 static enum _ecore_status_t
1638 1642 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1639 1643 struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1640 1644 {
1641 1645 struct ecore_mcp_mb_params mb_params;
1642 1646 enum _ecore_status_t rc;
1643 1647
1644 1648 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1645 1649 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1646 1650 mb_params.param = p_mdump_cmd_params->cmd;
1647 1651 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1648 1652 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1649 1653 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1650 1654 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1651 1655 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1652 1656 if (rc != ECORE_SUCCESS)
1653 1657 return rc;
1654 1658
1655 1659 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1656 1660
1657 1661 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1658 1662 DP_INFO(p_hwfn,
1659 1663 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1660 1664 p_mdump_cmd_params->cmd);
1661 1665 rc = ECORE_NOTIMPL;
1662 1666 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1663 1667 DP_INFO(p_hwfn,
1664 1668 "The mdump command is not supported by the MFW\n");
1665 1669 rc = ECORE_NOTIMPL;
1666 1670 }
1667 1671
1668 1672 return rc;
1669 1673 }
1670 1674
1671 1675 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1672 1676 struct ecore_ptt *p_ptt)
1673 1677 {
1674 1678 struct ecore_mdump_cmd_params mdump_cmd_params;
1675 1679
1676 1680 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1677 1681 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1678 1682
1679 1683 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1680 1684 }
1681 1685
1682 1686 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1683 1687 struct ecore_ptt *p_ptt,
1684 1688 u32 epoch)
1685 1689 {
1686 1690 struct ecore_mdump_cmd_params mdump_cmd_params;
1687 1691
1688 1692 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1689 1693 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1690 1694 mdump_cmd_params.p_data_src = &epoch;
1691 1695 mdump_cmd_params.data_src_size = sizeof(epoch);
1692 1696
1693 1697 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1694 1698 }
1695 1699
1696 1700 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1697 1701 struct ecore_ptt *p_ptt)
1698 1702 {
1699 1703 struct ecore_mdump_cmd_params mdump_cmd_params;
1700 1704
1701 1705 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1702 1706 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1703 1707
1704 1708 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1705 1709 }
1706 1710
1707 1711 static enum _ecore_status_t
1708 1712 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1709 1713 struct mdump_config_stc *p_mdump_config)
1710 1714 {
1711 1715 struct ecore_mdump_cmd_params mdump_cmd_params;
1712 1716 enum _ecore_status_t rc;
1713 1717
1714 1718 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1715 1719 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1716 1720 mdump_cmd_params.p_data_dst = p_mdump_config;
1717 1721 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1718 1722
1719 1723 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1720 1724 if (rc != ECORE_SUCCESS)
1721 1725 return rc;
1722 1726
1723 1727 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1724 1728 DP_INFO(p_hwfn,
1725 1729 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1726 1730 mdump_cmd_params.mcp_resp);
1727 1731 rc = ECORE_UNKNOWN_ERROR;
1728 1732 }
1729 1733
1730 1734 return rc;
1731 1735 }
1732 1736
1733 1737 enum _ecore_status_t
1734 1738 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1735 1739 struct ecore_mdump_info *p_mdump_info)
1736 1740 {
1737 1741 u32 addr, global_offsize, global_addr;
1738 1742 struct mdump_config_stc mdump_config;
1739 1743 enum _ecore_status_t rc;
1740 1744
1741 1745 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1742 1746
1743 1747 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1744 1748 PUBLIC_GLOBAL);
1745 1749 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1746 1750 global_addr = SECTION_ADDR(global_offsize, 0);
1747 1751 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1748 1752 global_addr +
1749 1753 OFFSETOF(struct public_global,
1750 1754 mdump_reason));
1751 1755
1752 1756 if (p_mdump_info->reason) {
1753 1757 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1754 1758 if (rc != ECORE_SUCCESS)
1755 1759 return rc;
1756 1760
1757 1761 p_mdump_info->version = mdump_config.version;
1758 1762 p_mdump_info->config = mdump_config.config;
1759 1763 p_mdump_info->epoch = mdump_config.epoc;
1760 1764 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1761 1765 p_mdump_info->valid_logs = mdump_config.valid_logs;
1762 1766
1763 1767 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1764 1768 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1765 1769 p_mdump_info->reason, p_mdump_info->version,
1766 1770 p_mdump_info->config, p_mdump_info->epoch,
1767 1771 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1768 1772 } else {
1769 1773 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1770 1774 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1771 1775 }
1772 1776
1773 1777 return ECORE_SUCCESS;
1774 1778 }
1775 1779
1776 1780 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1777 1781 struct ecore_ptt *p_ptt)
1778 1782 {
1779 1783 struct ecore_mdump_cmd_params mdump_cmd_params;
1780 1784
1781 1785 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1782 1786 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1783 1787
1784 1788 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1785 1789 }
1786 1790
1787 1791 enum _ecore_status_t
1788 1792 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1789 1793 struct ecore_mdump_retain_data *p_mdump_retain)
1790 1794 {
1791 1795 struct ecore_mdump_cmd_params mdump_cmd_params;
1792 1796 struct mdump_retain_data_stc mfw_mdump_retain;
1793 1797 enum _ecore_status_t rc;
1794 1798
1795 1799 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1796 1800 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1797 1801 mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1798 1802 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1799 1803
1800 1804 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1801 1805 if (rc != ECORE_SUCCESS)
1802 1806 return rc;
1803 1807
1804 1808 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1805 1809 DP_INFO(p_hwfn,
1806 1810 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1807 1811 mdump_cmd_params.mcp_resp);
1808 1812 return ECORE_UNKNOWN_ERROR;
1809 1813 }
1810 1814
1811 1815 p_mdump_retain->valid = mfw_mdump_retain.valid;
1812 1816 p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1813 1817 p_mdump_retain->pf = mfw_mdump_retain.pf;
1814 1818 p_mdump_retain->status = mfw_mdump_retain.status;
1815 1819
1816 1820 return ECORE_SUCCESS;
1817 1821 }
1818 1822
1819 1823 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1820 1824 struct ecore_ptt *p_ptt)
1821 1825 {
1822 1826 struct ecore_mdump_cmd_params mdump_cmd_params;
1823 1827
1824 1828 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1825 1829 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1826 1830
1827 1831 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1828 1832 }
1829 1833
1830 1834 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1831 1835 struct ecore_ptt *p_ptt)
1832 1836 {
1833 1837 struct ecore_mdump_retain_data mdump_retain;
1834 1838 enum _ecore_status_t rc;
1835 1839
1836 1840 /* In CMT mode - no need for more than a single acknowledgement to the
1837 1841 * MFW, and no more than a single notification to the upper driver.
1838 1842 */
1839 1843 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1840 1844 return;
1841 1845
1842 1846 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1843 1847 if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1844 1848 DP_NOTICE(p_hwfn, false,
1845 1849 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1846 1850 mdump_retain.epoch, mdump_retain.pf,
1847 1851 mdump_retain.status);
1848 1852 } else {
1849 1853 DP_NOTICE(p_hwfn, false,
1850 1854 "The MFW notified that a critical error occurred in the device\n");
1851 1855 }
1852 1856
1853 1857 if (p_hwfn->p_dev->allow_mdump) {
1854 1858 DP_NOTICE(p_hwfn, false,
1855 1859 "Not acknowledging the notification to allow the MFW crash dump\n");
1856 1860 return;
1857 1861 }
1858 1862
1859 1863 DP_NOTICE(p_hwfn, false,
1860 1864 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1861 1865 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1862 1866 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1863 1867 }
1864 1868
1865 1869 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1866 1870 struct ecore_ptt *p_ptt)
1867 1871 {
1868 1872 struct ecore_mcp_info *info = p_hwfn->mcp_info;
1869 1873 enum _ecore_status_t rc = ECORE_SUCCESS;
1870 1874 bool found = false;
1871 1875 u16 i;
1872 1876
1873 1877 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1874 1878
1875 1879 /* Read Messages from MFW */
1876 1880 ecore_mcp_read_mb(p_hwfn, p_ptt);
1877 1881
1878 1882 /* Compare current messages to old ones */
1879 1883 for (i = 0; i < info->mfw_mb_length; i++) {
1880 1884 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1881 1885 continue;
1882 1886
1883 1887 found = true;
1884 1888
1885 1889 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1886 1890 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1887 1891 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1888 1892
1889 1893 switch (i) {
1890 1894 case MFW_DRV_MSG_LINK_CHANGE:
1891 1895 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1892 1896 break;
1893 1897 case MFW_DRV_MSG_VF_DISABLED:
1894 1898 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1895 1899 break;
1896 1900 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1897 1901 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1898 1902 ECORE_DCBX_REMOTE_LLDP_MIB);
1899 1903 break;
1900 1904 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1901 1905 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1902 1906 ECORE_DCBX_REMOTE_MIB);
1903 1907 break;
1904 1908 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1905 1909 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1906 1910 ECORE_DCBX_OPERATIONAL_MIB);
1907 1911 break;
1908 1912 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1909 1913 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1910 1914 break;
1911 1915 case MFW_DRV_MSG_ERROR_RECOVERY:
1912 1916 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1913 1917 break;
1914 1918 case MFW_DRV_MSG_GET_LAN_STATS:
1915 1919 case MFW_DRV_MSG_GET_FCOE_STATS:
1916 1920 case MFW_DRV_MSG_GET_ISCSI_STATS:
1917 1921 case MFW_DRV_MSG_GET_RDMA_STATS:
1918 1922 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1919 1923 break;
1920 1924 case MFW_DRV_MSG_BW_UPDATE:
1921 1925 ecore_mcp_update_bw(p_hwfn, p_ptt);
1922 1926 break;
1923 1927 case MFW_DRV_MSG_S_TAG_UPDATE:
1924 1928 ecore_mcp_update_stag(p_hwfn, p_ptt);
1925 1929 break;
1926 1930 case MFW_DRV_MSG_FAILURE_DETECTED:
1927 1931 ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1928 1932 break;
1929 1933 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1930 1934 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1931 1935 break;
1932 1936 case MFW_DRV_MSG_GET_TLV_REQ:
1933 1937 OSAL_MFW_TLV_REQ(p_hwfn);
1934 1938 break;
1935 1939 default:
1936 1940 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1937 1941 rc = ECORE_INVAL;
1938 1942 }
1939 1943 }
1940 1944
1941 1945 /* ACK everything */
1942 1946 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1943 1947 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1944 1948
1945 1949 /* MFW expect answer in BE, so we force write in that format */
1946 1950 ecore_wr(p_hwfn, p_ptt,
1947 1951 info->mfw_mb_addr + sizeof(u32) +
1948 1952 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1949 1953 sizeof(u32) + i * sizeof(u32), val);
1950 1954 }
1951 1955
1952 1956 if (!found) {
1953 1957 DP_NOTICE(p_hwfn, false,
1954 1958 "Received an MFW message indication but no new message!\n");
1955 1959 rc = ECORE_INVAL;
1956 1960 }
1957 1961
1958 1962 /* Copy the new mfw messages into the shadow */
1959 1963 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1960 1964
1961 1965 return rc;
1962 1966 }
1963 1967
1964 1968 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1965 1969 struct ecore_ptt *p_ptt,
1966 1970 u32 *p_mfw_ver,
1967 1971 u32 *p_running_bundle_id)
1968 1972 {
1969 1973 u32 global_offsize;
1970 1974
1971 1975 #ifndef ASIC_ONLY
1972 1976 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1973 1977 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1974 1978 return ECORE_SUCCESS;
1975 1979 }
1976 1980 #endif
1977 1981
1978 1982 if (IS_VF(p_hwfn->p_dev)) {
1979 1983 if (p_hwfn->vf_iov_info) {
1980 1984 struct pfvf_acquire_resp_tlv *p_resp;
1981 1985
1982 1986 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1983 1987 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1984 1988 return ECORE_SUCCESS;
1985 1989 } else {
1986 1990 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1987 1991 "VF requested MFW version prior to ACQUIRE\n");
1988 1992 return ECORE_INVAL;
1989 1993 }
1990 1994 }
1991 1995
1992 1996 global_offsize = ecore_rd(p_hwfn, p_ptt,
1993 1997 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1994 1998 PUBLIC_GLOBAL));
1995 1999 *p_mfw_ver = ecore_rd(p_hwfn, p_ptt,
1996 2000 SECTION_ADDR(global_offsize, 0) +
1997 2001 OFFSETOF(struct public_global, mfw_ver));
1998 2002
1999 2003 if (p_running_bundle_id != OSAL_NULL) {
2000 2004 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2001 2005 SECTION_ADDR(global_offsize, 0) +
2002 2006 OFFSETOF(struct public_global,
2003 2007 running_bundle_id));
2004 2008 }
2005 2009
2006 2010 return ECORE_SUCCESS;
2007 2011 }
2008 2012
2009 2013 enum _ecore_status_t ecore_mcp_get_mbi_ver(struct ecore_hwfn *p_hwfn,
2010 2014 struct ecore_ptt *p_ptt,
2011 2015 u32 *p_mbi_ver)
2012 2016 {
2013 2017 u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
2014 2018
2015 2019 #ifndef ASIC_ONLY
2016 2020 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2017 2021 DP_NOTICE(p_hwfn, false, "Emulation - can't get MBI version\n");
2018 2022 return ECORE_SUCCESS;
2019 2023 }
2020 2024 #endif
2021 2025
2022 2026 if (IS_VF(p_hwfn->p_dev))
2023 2027 return ECORE_INVAL;
2024 2028
2025 2029 /* Read the address of the nvm_cfg */
2026 2030 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2027 2031 if (!nvm_cfg_addr) {
2028 2032 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
2029 2033 return ECORE_INVAL;
2030 2034 }
2031 2035
2032 2036 /* Read the offset of nvm_cfg1 */
2033 2037 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2034 2038
2035 2039 mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2036 2040 OFFSETOF(struct nvm_cfg1, glob) +
2037 2041 OFFSETOF(struct nvm_cfg1_glob, mbi_version);
2038 2042 *p_mbi_ver = ecore_rd(p_hwfn, p_ptt, mbi_ver_addr) &
2039 2043 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2040 2044 NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2041 2045 NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2042 2046
2043 2047 return ECORE_SUCCESS;
2044 2048 }
2045 2049
2046 2050 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
2047 2051 u32 *p_media_type)
2048 2052 {
2049 2053 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
2050 2054 struct ecore_ptt *p_ptt;
2051 2055
2052 2056 /* TODO - Add support for VFs */
2053 2057 if (IS_VF(p_dev))
2054 2058 return ECORE_INVAL;
2055 2059
2056 2060 if (!ecore_mcp_is_init(p_hwfn)) {
2057 2061 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
2058 2062 return ECORE_BUSY;
2059 2063 }
2060 2064
2061 2065 *p_media_type = MEDIA_UNSPECIFIED;
2062 2066
2063 2067 p_ptt = ecore_ptt_acquire(p_hwfn);
2064 2068 if (!p_ptt)
2065 2069 return ECORE_BUSY;
2066 2070
2067 2071 *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2068 2072 OFFSETOF(struct public_port, media_type));
2069 2073
2070 2074 ecore_ptt_release(p_hwfn, p_ptt);
2071 2075
2072 2076 return ECORE_SUCCESS;
2073 2077 }
2074 2078
2075 2079 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2076 2080 static void
2077 2081 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2078 2082 enum ecore_pci_personality *p_proto)
2079 2083 {
2080 2084 /* There wasn't ever a legacy MFW that published iwarp.
2081 2085 * So at this point, this is either plain l2 or RoCE.
2082 2086 */
2083 2087 if (OSAL_TEST_BIT(ECORE_DEV_CAP_ROCE,
2084 2088 &p_hwfn->hw_info.device_capabilities))
2085 2089 *p_proto = ECORE_PCI_ETH_ROCE;
2086 2090 else
2087 2091 *p_proto = ECORE_PCI_ETH;
2088 2092
2089 2093 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2090 2094 "According to Legacy capabilities, L2 personality is %08x\n",
2091 2095 (u32) *p_proto);
2092 2096 }
2093 2097
2094 2098 static enum _ecore_status_t
2095 2099 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2096 2100 struct ecore_ptt *p_ptt,
2097 2101 enum ecore_pci_personality *p_proto)
2098 2102 {
2099 2103 u32 resp = 0, param = 0;
2100 2104 enum _ecore_status_t rc;
2101 2105
2102 2106 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2103 2107 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m);
2104 2108 if (rc != ECORE_SUCCESS)
2105 2109 return rc;
2106 2110 if (resp != FW_MSG_CODE_OK) {
2107 2111 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2108 2112 "MFW lacks support for command; Returns %08x\n",
2109 2113 resp);
2110 2114 return ECORE_INVAL;
2111 2115 }
2112 2116
2113 2117 switch (param) {
2114 2118 case FW_MB_PARAM_GET_PF_RDMA_NONE:
2115 2119 *p_proto = ECORE_PCI_ETH;
2116 2120 break;
2117 2121 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2118 2122 *p_proto = ECORE_PCI_ETH_ROCE;
2119 2123 break;
2120 2124 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2121 2125 *p_proto = ECORE_PCI_ETH_IWARP;
2122 2126 break;
2123 2127 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2124 2128 *p_proto = ECORE_PCI_ETH_RDMA;
2125 2129 break;
2126 2130 default:
2127 2131 DP_NOTICE(p_hwfn, true,
2128 2132 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2129 2133 param);
2130 2134 return ECORE_INVAL;
2131 2135 }
2132 2136
2133 2137 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2134 2138 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2135 2139 (u32) *p_proto, resp, param);
2136 2140 return ECORE_SUCCESS;
2137 2141 }
2138 2142
2139 2143 static enum _ecore_status_t
2140 2144 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2141 2145 struct public_func *p_info,
2142 2146 struct ecore_ptt *p_ptt,
2143 2147 enum ecore_pci_personality *p_proto)
2144 2148 {
2145 2149 enum _ecore_status_t rc = ECORE_SUCCESS;
2146 2150
2147 2151 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2148 2152 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2149 2153 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2150 2154 ECORE_SUCCESS)
2151 2155 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2152 2156 break;
2153 2157 case FUNC_MF_CFG_PROTOCOL_ISCSI:
2154 2158 *p_proto = ECORE_PCI_ISCSI;
2155 2159 break;
2156 2160 case FUNC_MF_CFG_PROTOCOL_FCOE:
2157 2161 *p_proto = ECORE_PCI_FCOE;
2158 2162 break;
2159 2163 case FUNC_MF_CFG_PROTOCOL_ROCE:
2160 2164 DP_NOTICE(p_hwfn, true, "RoCE personality is not a valid value!\n");
2161 2165 rc = ECORE_INVAL;
2162 2166 break;
2163 2167 default:
2164 2168 rc = ECORE_INVAL;
2165 2169 }
2166 2170
2167 2171 return rc;
2168 2172 }
2169 2173
2170 2174 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2171 2175 struct ecore_ptt *p_ptt)
2172 2176 {
2173 2177 struct ecore_mcp_function_info *info;
2174 2178 struct public_func shmem_info;
2175 2179
2176 2180 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2177 2181 MCP_PF_ID(p_hwfn));
2178 2182 info = &p_hwfn->mcp_info->func_info;
2179 2183
2180 2184 info->pause_on_host = (shmem_info.config &
2181 2185 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2182 2186
2183 2187 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2184 2188 &info->protocol)) {
2185 2189 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2186 2190 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2187 2191 return ECORE_INVAL;
2188 2192 }
2189 2193
2190 2194 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2191 2195
2192 2196 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2193 2197 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2194 2198 info->mac[1] = (u8)(shmem_info.mac_upper);
2195 2199 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2196 2200 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2197 2201 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2198 2202 info->mac[5] = (u8)(shmem_info.mac_lower);
2199 2203
2200 2204 /* Store primary MAC for later possible WoL */
2201 2205 OSAL_MEMCPY(&p_hwfn->p_dev->wol_mac, info->mac, ETH_ALEN);
2202 2206
2203 2207 } else {
2204 2208 /* TODO - are there protocols for which there's no MAC? */
2205 2209 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2206 2210 }
2207 2211
2208 2212 /* TODO - are these calculations true for BE machine? */
2209 2213 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2210 2214 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2211 2215 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2212 2216 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2213 2217
2214 2218 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2215 2219
2216 2220 info->mtu = (u16)shmem_info.mtu_size;
2217 2221
2218 2222 p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_NONE;
2219 2223 if (ecore_mcp_is_init(p_hwfn)) {
2220 2224 u32 resp = 0, param = 0;
2221 2225 enum _ecore_status_t rc;
2222 2226
2223 2227 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2224 2228 DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m);
2225 2229 if (rc != ECORE_SUCCESS)
2226 2230 return rc;
2227 2231 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2228 2232 p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_PME;
2229 2233 }
2230 2234 p_hwfn->p_dev->wol_config = (u8)ECORE_OV_WOL_DEFAULT;
2231 2235
2232 2236 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2233 2237 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2234 2238 info->pause_on_host, info->protocol,
2235 2239 info->bandwidth_min, info->bandwidth_max,
2236 2240 info->mac[0], info->mac[1], info->mac[2],
2237 2241 info->mac[3], info->mac[4], info->mac[5],
2238 2242 info->wwn_port, info->wwn_node, info->ovlan,
2239 2243 (u8)p_hwfn->hw_info.b_wol_support);
2240 2244
2241 2245 return ECORE_SUCCESS;
2242 2246 }
2243 2247
2244 2248 struct ecore_mcp_link_params
2245 2249 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2246 2250 {
2247 2251 if (!p_hwfn || !p_hwfn->mcp_info)
2248 2252 return OSAL_NULL;
2249 2253 return &p_hwfn->mcp_info->link_input;
2250 2254 }
2251 2255
2252 2256 struct ecore_mcp_link_state
2253 2257 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2254 2258 {
2255 2259 if (!p_hwfn || !p_hwfn->mcp_info)
2256 2260 return OSAL_NULL;
2257 2261
2258 2262 #ifndef ASIC_ONLY
2259 2263 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2260 2264 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2261 2265 p_hwfn->mcp_info->link_output.link_up = true;
2262 2266 }
2263 2267 #endif
2264 2268
2265 2269 return &p_hwfn->mcp_info->link_output;
2266 2270 }
2267 2271
2268 2272 struct ecore_mcp_link_capabilities
2269 2273 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2270 2274 {
2271 2275 if (!p_hwfn || !p_hwfn->mcp_info)
2272 2276 return OSAL_NULL;
2273 2277 return &p_hwfn->mcp_info->link_capabilities;
2274 2278 }
2275 2279
2276 2280 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2277 2281 struct ecore_ptt *p_ptt)
2278 2282 {
2279 2283 u32 resp = 0, param = 0;
2280 2284 enum _ecore_status_t rc;
2281 2285
2282 2286 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2283 2287 DRV_MSG_CODE_NIG_DRAIN, 1000,
2284 2288 &resp, ¶m);
2285 2289
2286 2290 /* Wait for the drain to complete before returning */
2287 2291 OSAL_MSLEEP(1020);
2288 2292
2289 2293 return rc;
2290 2294 }
2291 2295
2292 2296 #ifndef LINUX_REMOVE
2293 2297 const struct ecore_mcp_function_info
2294 2298 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2295 2299 {
2296 2300 if (!p_hwfn || !p_hwfn->mcp_info)
2297 2301 return OSAL_NULL;
2298 2302 return &p_hwfn->mcp_info->func_info;
2299 2303 }
2300 2304 #endif
2301 2305
2302 2306 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
2303 2307 struct ecore_ptt *p_ptt,
2304 2308 struct ecore_mcp_nvm_params *params)
2305 2309 {
2306 2310 enum _ecore_status_t rc;
2307 2311
2308 2312 switch (params->type) {
2309 2313 case ECORE_MCP_NVM_RD:
2310 2314 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
2311 2315 params->nvm_common.offset,
2312 2316 ¶ms->nvm_common.resp,
2313 2317 ¶ms->nvm_common.param,
2314 2318 params->nvm_rd.buf_size,
2315 2319 params->nvm_rd.buf);
2316 2320 break;
2317 2321 case ECORE_MCP_CMD:
2318 2322 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
2319 2323 params->nvm_common.offset,
2320 2324 ¶ms->nvm_common.resp,
2321 2325 ¶ms->nvm_common.param);
2322 2326 break;
2323 2327 case ECORE_MCP_NVM_WR:
2324 2328 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
2325 2329 params->nvm_common.offset,
2326 2330 ¶ms->nvm_common.resp,
2327 2331 ¶ms->nvm_common.param,
2328 2332 params->nvm_wr.buf_size,
2329 2333 params->nvm_wr.buf);
2330 2334 break;
2331 2335 default:
2332 2336 rc = ECORE_NOTIMPL;
2333 2337 break;
2334 2338 }
2335 2339 return rc;
2336 2340 }
2337 2341
2338 2342 #ifndef LINUX_REMOVE
2339 2343 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2340 2344 struct ecore_ptt *p_ptt,
2341 2345 u32 personalities)
2342 2346 {
2343 2347 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2344 2348 struct public_func shmem_info;
2345 2349 int i, count = 0, num_pfs;
2346 2350
2347 2351 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2348 2352
2349 2353 for (i = 0; i < num_pfs; i++) {
2350 2354 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2351 2355 MCP_PF_ID_BY_REL(p_hwfn, i));
2352 2356 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2353 2357 continue;
2354 2358
2355 2359 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2356 2360 &protocol) !=
2357 2361 ECORE_SUCCESS)
2358 2362 continue;
2359 2363
2360 2364 if ((1 << ((u32)protocol)) & personalities)
2361 2365 count++;
2362 2366 }
2363 2367
2364 2368 return count;
2365 2369 }
2366 2370 #endif
2367 2371
2368 2372 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2369 2373 struct ecore_ptt *p_ptt,
2370 2374 u32 *p_flash_size)
2371 2375 {
2372 2376 u32 flash_size;
2373 2377
2374 2378 #ifndef ASIC_ONLY
2375 2379 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2376 2380 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2377 2381 return ECORE_INVAL;
2378 2382 }
2379 2383 #endif
2380 2384
2381 2385 if (IS_VF(p_hwfn->p_dev))
2382 2386 return ECORE_INVAL;
2383 2387
2384 2388 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2385 2389 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2386 2390 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2387 2391 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
2388 2392
2389 2393 *p_flash_size = flash_size;
2390 2394
2391 2395 return ECORE_SUCCESS;
2392 2396 }
2393 2397
2394 2398 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2395 2399 struct ecore_ptt *p_ptt)
2396 2400 {
2397 2401 struct ecore_dev *p_dev = p_hwfn->p_dev;
2398 2402
2399 2403 if (p_dev->recov_in_prog) {
2400 2404 DP_NOTICE(p_hwfn, false,
2401 2405 "Avoid triggering a recovery since such a process is already in progress\n");
2402 2406 return ECORE_AGAIN;
2403 2407 }
2404 2408
2405 2409 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2406 2410 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2407 2411
2408 2412 return ECORE_SUCCESS;
2409 2413 }
2410 2414
2411 2415 static enum _ecore_status_t
2412 2416 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2413 2417 struct ecore_ptt *p_ptt,
2414 2418 u8 vf_id, u8 num)
2415 2419 {
2416 2420 u32 resp = 0, param = 0, rc_param = 0;
2417 2421 enum _ecore_status_t rc;
2418 2422
2419 2423 /* Only Leader can configure MSIX, and need to take CMT into account */
2420 2424 if (!IS_LEAD_HWFN(p_hwfn))
2421 2425 return ECORE_SUCCESS;
2422 2426 num *= p_hwfn->p_dev->num_hwfns;
2423 2427
2424 2428 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2425 2429 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2426 2430 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2427 2431 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2428 2432
2429 2433 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2430 2434 &resp, &rc_param);
2431 2435
2432 2436 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2433 2437 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2434 2438 vf_id);
2435 2439 rc = ECORE_INVAL;
2436 2440 } else {
2437 2441 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2438 2442 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2439 2443 num, vf_id);
2440 2444 }
2441 2445
2442 2446 return rc;
2443 2447 }
2444 2448
2445 2449 static enum _ecore_status_t
2446 2450 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2447 2451 struct ecore_ptt *p_ptt,
2448 2452 u8 num)
2449 2453 {
2450 2454 u32 resp = 0, param = num, rc_param = 0;
2451 2455 enum _ecore_status_t rc;
2452 2456
2453 2457 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2454 2458 param, &resp, &rc_param);
2455 2459
2456 2460 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2457 2461 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2458 2462 rc = ECORE_INVAL;
2459 2463 } else {
2460 2464 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2461 2465 "Requested 0x%02x MSI-x interrupts for VFs\n",
2462 2466 num);
2463 2467 }
2464 2468
2465 2469 return rc;
2466 2470 }
2467 2471
2468 2472 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2469 2473 struct ecore_ptt *p_ptt,
2470 2474 u8 vf_id, u8 num)
2471 2475 {
2472 2476 if (ECORE_IS_BB(p_hwfn->p_dev))
2473 2477 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2474 2478 else
2475 2479 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2476 2480 }
2477 2481
2478 2482 enum _ecore_status_t
2479 2483 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2480 2484 struct ecore_mcp_drv_version *p_ver)
2481 2485 {
2482 2486 struct ecore_mcp_mb_params mb_params;
2483 2487 struct drv_version_stc drv_version;
2484 2488 u32 num_words, i;
2485 2489 void *p_name;
2486 2490 OSAL_BE32 val;
2487 2491 enum _ecore_status_t rc;
2488 2492
2489 2493 #ifndef ASIC_ONLY
2490 2494 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2491 2495 return ECORE_SUCCESS;
2492 2496 #endif
2493 2497
2494 2498 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2495 2499 drv_version.version = p_ver->version;
2496 2500 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2497 2501 for (i = 0; i < num_words; i++) {
2498 2502 /* The driver name is expected to be in a big-endian format */
2499 2503 p_name = &p_ver->name[i * sizeof(u32)];
2500 2504 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2501 2505 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2502 2506 }
2503 2507
2504 2508 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2505 2509 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2506 2510 mb_params.p_data_src = &drv_version;
2507 2511 mb_params.data_src_size = sizeof(drv_version);
2508 2512 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2509 2513 if (rc != ECORE_SUCCESS)
2510 2514 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2511 2515
2512 2516 return rc;
2513 2517 }
2514 2518
2515 2519 /* A maximal 100 msec waiting time for the MCP to halt */
2516 2520 #define ECORE_MCP_HALT_SLEEP_MS 10
2517 2521 #define ECORE_MCP_HALT_MAX_RETRIES 10
2518 2522
2519 2523 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2520 2524 struct ecore_ptt *p_ptt)
2521 2525 {
2522 2526 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2523 2527 enum _ecore_status_t rc;
2524 2528
2525 2529 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2526 2530 ¶m);
2527 2531 if (rc != ECORE_SUCCESS) {
2528 2532 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2529 2533 return rc;
2530 2534 }
2531 2535
2532 2536 do {
2533 2537 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
2534 2538 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2535 2539 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2536 2540 break;
2537 2541 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
2538 2542
2539 2543 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
2540 2544 DP_NOTICE(p_hwfn, false,
2541 2545 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2542 2546 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2543 2547 return ECORE_BUSY;
2544 2548 }
2545 2549
2546 2550 return ECORE_SUCCESS;
2547 2551 }
2548 2552
2549 2553 #define ECORE_MCP_RESUME_SLEEP_MS 10
2550 2554
2551 2555 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2552 2556 struct ecore_ptt *p_ptt)
2553 2557 {
2554 2558 u32 cpu_mode, cpu_state;
2555 2559
2556 2560 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2557 2561
2558 2562 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2559 2563 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2560 2564 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2561 2565
2562 2566 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
2563 2567 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2564 2568
2565 2569 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2566 2570 DP_NOTICE(p_hwfn, false,
2567 2571 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2568 2572 cpu_mode, cpu_state);
2569 2573 return ECORE_BUSY;
2570 2574 }
2571 2575
2572 2576 return ECORE_SUCCESS;
2573 2577 }
2574 2578
2575 2579 enum _ecore_status_t
2576 2580 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2577 2581 struct ecore_ptt *p_ptt,
2578 2582 enum ecore_ov_client client)
2579 2583 {
2580 2584 enum _ecore_status_t rc;
2581 2585 u32 resp = 0, param = 0;
2582 2586 u32 drv_mb_param;
2583 2587
2584 2588 switch (client) {
2585 2589 case ECORE_OV_CLIENT_DRV:
2586 2590 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2587 2591 break;
2588 2592 case ECORE_OV_CLIENT_USER:
2589 2593 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2590 2594 break;
2591 2595 case ECORE_OV_CLIENT_VENDOR_SPEC:
2592 2596 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2593 2597 break;
2594 2598 default:
2595 2599 DP_NOTICE(p_hwfn, true,
2596 2600 "Invalid client type %d\n", client);
2597 2601 return ECORE_INVAL;
2598 2602 }
2599 2603
2600 2604 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2601 2605 drv_mb_param, &resp, ¶m);
2602 2606 if (rc != ECORE_SUCCESS)
2603 2607 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2604 2608
2605 2609 return rc;
2606 2610 }
2607 2611
2608 2612 enum _ecore_status_t
2609 2613 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2610 2614 struct ecore_ptt *p_ptt,
2611 2615 enum ecore_ov_driver_state drv_state)
2612 2616 {
2613 2617 enum _ecore_status_t rc;
2614 2618 u32 resp = 0, param = 0;
2615 2619 u32 drv_mb_param;
2616 2620
2617 2621 switch (drv_state) {
2618 2622 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2619 2623 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2620 2624 break;
2621 2625 case ECORE_OV_DRIVER_STATE_DISABLED:
2622 2626 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2623 2627 break;
2624 2628 case ECORE_OV_DRIVER_STATE_ACTIVE:
2625 2629 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2626 2630 break;
2627 2631 default:
2628 2632 DP_NOTICE(p_hwfn, true,
2629 2633 "Invalid driver state %d\n", drv_state);
2630 2634 return ECORE_INVAL;
2631 2635 }
2632 2636
2633 2637 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2634 2638 drv_mb_param, &resp, ¶m);
2635 2639 if (rc != ECORE_SUCCESS)
2636 2640 DP_ERR(p_hwfn, "Failed to send driver state\n");
2637 2641
2638 2642 return rc;
2639 2643 }
2640 2644
2641 2645 enum _ecore_status_t
2642 2646 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2643 2647 struct ecore_fc_npiv_tbl *p_table)
2644 2648 {
2645 2649 enum _ecore_status_t rc = ECORE_SUCCESS;
2646 2650 struct dci_fc_npiv_tbl *p_npiv_table;
2647 2651 u8 *p_buf = OSAL_NULL;
2648 2652 u32 addr, size, i;
2649 2653
2650 2654 p_table->num_wwpn = 0;
2651 2655 p_table->num_wwnn = 0;
2652 2656 addr = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2653 2657 OFFSETOF(struct public_port, fc_npiv_nvram_tbl_addr));
2654 2658 if (addr == NPIV_TBL_INVALID_ADDR) {
2655 2659 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table doesn't exist\n");
2656 2660 return rc;
2657 2661 }
2658 2662
2659 2663 size = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2660 2664 OFFSETOF(struct public_port, fc_npiv_nvram_tbl_size));
2661 2665 if (!size) {
2662 2666 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table is empty\n");
2663 2667 return rc;
2664 2668 }
2665 2669
2666 2670 p_buf = OSAL_VZALLOC(p_hwfn->p_dev, size);
2667 2671 if (!p_buf) {
2668 2672 DP_ERR(p_hwfn, "Buffer allocation failed\n");
2669 2673 return ECORE_NOMEM;
2670 2674 }
2671 2675
2672 2676 rc = ecore_mcp_nvm_read(p_hwfn->p_dev, addr, p_buf, size);
2673 2677 if (rc != ECORE_SUCCESS) {
2674 2678 OSAL_VFREE(p_hwfn->p_dev, p_buf);
2675 2679 return rc;
2676 2680 }
2677 2681
2678 2682 p_npiv_table = (struct dci_fc_npiv_tbl *)p_buf;
2679 2683 p_table->num_wwpn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
2680 2684 p_table->num_wwnn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
2681 2685 for (i = 0; i < p_table->num_wwpn; i++) {
2682 2686 OSAL_MEMCPY(p_table->wwpn, p_npiv_table->settings[i].npiv_wwpn,
2683 2687 ECORE_WWN_SIZE);
2684 2688 OSAL_MEMCPY(p_table->wwnn, p_npiv_table->settings[i].npiv_wwnn,
2685 2689 ECORE_WWN_SIZE);
2686 2690 }
2687 2691
2688 2692 OSAL_VFREE(p_hwfn->p_dev, p_buf);
2689 2693
2690 2694 return ECORE_SUCCESS;
2691 2695 }
2692 2696
2693 2697 enum _ecore_status_t
2694 2698 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2695 2699 u16 mtu)
2696 2700 {
2697 2701 enum _ecore_status_t rc;
2698 2702 u32 resp = 0, param = 0;
2699 2703 u32 drv_mb_param;
2700 2704
2701 2705 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2702 2706 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2703 2707 drv_mb_param, &resp, ¶m);
2704 2708 if (rc != ECORE_SUCCESS)
2705 2709 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2706 2710
2707 2711 return rc;
2708 2712 }
2709 2713
2710 2714 enum _ecore_status_t
2711 2715 ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2712 2716 u8 *mac)
2713 2717 {
2714 2718 struct ecore_mcp_mb_params mb_params;
2715 2719 enum _ecore_status_t rc;
2716 2720 u32 mfw_mac[2];
2717 2721
2718 2722 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2719 2723 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2720 2724 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2721 2725 DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2722 2726 mb_params.param |= MCP_PF_ID(p_hwfn);
2723 2727
2724 2728 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2725 2729 * in 32-bit granularity.
2726 2730 * So the MAC has to be set in native order [and not byte order],
2727 2731 * otherwise it would be read incorrectly by MFW after swap.
2728 2732 */
2729 2733 mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2730 2734 mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2731 2735
2732 2736 mb_params.p_data_src = (u8 *)mfw_mac;
2733 2737 mb_params.data_src_size = 8;
2734 2738 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2735 2739 if (rc != ECORE_SUCCESS)
2736 2740 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2737 2741
2738 2742 /* Store primary MAC for later possible WoL */
2739 2743 OSAL_MEMCPY(p_hwfn->p_dev->wol_mac, mac, ETH_ALEN);
2740 2744
2741 2745 return rc;
2742 2746 }
2743 2747
2744 2748 enum _ecore_status_t
2745 2749 ecore_mcp_ov_update_wol(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2746 2750 enum ecore_ov_wol wol)
2747 2751 {
2748 2752 enum _ecore_status_t rc;
2749 2753 u32 resp = 0, param = 0;
2750 2754 u32 drv_mb_param;
2751 2755
2752 2756 if (p_hwfn->hw_info.b_wol_support == ECORE_WOL_SUPPORT_NONE) {
2753 2757 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2754 2758 "Can't change WoL configuration when WoL isn't supported\n");
2755 2759 return ECORE_INVAL;
2756 2760 }
2757 2761
2758 2762 switch (wol) {
2759 2763 case ECORE_OV_WOL_DEFAULT:
2760 2764 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2761 2765 break;
2762 2766 case ECORE_OV_WOL_DISABLED:
2763 2767 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2764 2768 break;
2765 2769 case ECORE_OV_WOL_ENABLED:
2766 2770 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2767 2771 break;
2768 2772 default:
2769 2773 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2770 2774 return ECORE_INVAL;
2771 2775 }
2772 2776
2773 2777 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2774 2778 drv_mb_param, &resp, ¶m);
2775 2779 if (rc != ECORE_SUCCESS)
2776 2780 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2777 2781
2778 2782 /* Store the WoL update for a future unload */
2779 2783 p_hwfn->p_dev->wol_config = (u8)wol;
2780 2784
2781 2785 return rc;
2782 2786 }
2783 2787
2784 2788 enum _ecore_status_t
2785 2789 ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2786 2790 enum ecore_ov_eswitch eswitch)
2787 2791 {
2788 2792 enum _ecore_status_t rc;
2789 2793 u32 resp = 0, param = 0;
2790 2794 u32 drv_mb_param;
2791 2795
2792 2796 switch (eswitch) {
2793 2797 case ECORE_OV_ESWITCH_NONE:
2794 2798 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2795 2799 break;
2796 2800 case ECORE_OV_ESWITCH_VEB:
2797 2801 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2798 2802 break;
2799 2803 case ECORE_OV_ESWITCH_VEPA:
2800 2804 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2801 2805 break;
2802 2806 default:
2803 2807 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2804 2808 return ECORE_INVAL;
2805 2809 }
2806 2810
2807 2811 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2808 2812 drv_mb_param, &resp, ¶m);
2809 2813 if (rc != ECORE_SUCCESS)
2810 2814 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2811 2815
2812 2816 return rc;
2813 2817 }
2814 2818
2815 2819 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2816 2820 struct ecore_ptt *p_ptt,
2817 2821 enum ecore_led_mode mode)
2818 2822 {
2819 2823 u32 resp = 0, param = 0, drv_mb_param;
2820 2824 enum _ecore_status_t rc;
2821 2825
2822 2826 switch (mode) {
2823 2827 case ECORE_LED_MODE_ON:
2824 2828 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2825 2829 break;
2826 2830 case ECORE_LED_MODE_OFF:
2827 2831 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2828 2832 break;
2829 2833 case ECORE_LED_MODE_RESTORE:
2830 2834 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2831 2835 break;
2832 2836 default:
2833 2837 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2834 2838 return ECORE_INVAL;
2835 2839 }
2836 2840
2837 2841 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2838 2842 drv_mb_param, &resp, ¶m);
2839 2843 if (rc != ECORE_SUCCESS)
2840 2844 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2841 2845
2842 2846 return rc;
2843 2847 }
2844 2848
2845 2849 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2846 2850 struct ecore_ptt *p_ptt,
2847 2851 u32 mask_parities)
2848 2852 {
2849 2853 enum _ecore_status_t rc;
2850 2854 u32 resp = 0, param = 0;
2851 2855
2852 2856 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2853 2857 mask_parities, &resp, ¶m);
2854 2858
2855 2859 if (rc != ECORE_SUCCESS) {
2856 2860 DP_ERR(p_hwfn, "MCP response failure for mask parities, aborting\n");
2857 2861 } else if (resp != FW_MSG_CODE_OK) {
2858 2862 DP_ERR(p_hwfn, "MCP did not acknowledge mask parity request. Old MFW?\n");
2859 2863 rc = ECORE_INVAL;
2860 2864 }
2861 2865
2862 2866 return rc;
2863 2867 }
2864 2868
2865 2869 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2866 2870 u8 *p_buf, u32 len)
2867 2871 {
2868 2872 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2869 2873 u32 bytes_left, offset, bytes_to_copy, buf_size;
2870 2874 struct ecore_mcp_nvm_params params;
2871 2875 struct ecore_ptt *p_ptt;
2872 2876 enum _ecore_status_t rc = ECORE_SUCCESS;
2873 2877
2874 2878 p_ptt = ecore_ptt_acquire(p_hwfn);
2875 2879 if (!p_ptt)
2876 2880 return ECORE_BUSY;
2877 2881
2878 2882 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2879 2883 bytes_left = len;
2880 2884 offset = 0;
2881 2885 params.type = ECORE_MCP_NVM_RD;
2882 2886 params.nvm_rd.buf_size = &buf_size;
2883 2887 params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
2884 2888 while (bytes_left > 0) {
2885 2889 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2886 2890 MCP_DRV_NVM_BUF_LEN);
2887 2891 params.nvm_common.offset = (addr + offset) |
2888 2892 (bytes_to_copy <<
2889 2893 DRV_MB_PARAM_NVM_LEN_SHIFT);
2890 2894 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2891 2895 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2892 2896 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
2893 2897 FW_MSG_CODE_NVM_OK)) {
2894 2898 DP_NOTICE(p_dev, false, "MCP command rc = %d\n",
2895 2899 rc);
2896 2900 break;
2897 2901 }
2898 2902
2899 2903 /* This can be a lengthy process, and it's possible scheduler
2900 2904 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2901 2905 */
2902 2906 if (bytes_left % 0x1000 <
2903 2907 (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
2904 2908 OSAL_MSLEEP(1);
2905 2909
2906 2910 offset += *params.nvm_rd.buf_size;
2907 2911 bytes_left -= *params.nvm_rd.buf_size;
2908 2912 }
2909 2913
2910 2914 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2911 2915 ecore_ptt_release(p_hwfn, p_ptt);
2912 2916
2913 2917 return rc;
2914 2918 }
2915 2919
2916 2920 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2917 2921 u32 addr, u8 *p_buf, u32 len)
2918 2922 {
2919 2923 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2920 2924 struct ecore_mcp_nvm_params params;
2921 2925 struct ecore_ptt *p_ptt;
2922 2926 enum _ecore_status_t rc;
2923 2927
2924 2928 p_ptt = ecore_ptt_acquire(p_hwfn);
2925 2929 if (!p_ptt)
2926 2930 return ECORE_BUSY;
2927 2931
2928 2932 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2929 2933 params.type = ECORE_MCP_NVM_RD;
2930 2934 params.nvm_rd.buf_size = &len;
2931 2935 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
2932 2936 DRV_MSG_CODE_PHY_CORE_READ :
2933 2937 DRV_MSG_CODE_PHY_RAW_READ;
2934 2938 params.nvm_common.offset = addr;
2935 2939 params.nvm_rd.buf = (u32 *)p_buf;
2936 2940 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2937 2941 if (rc != ECORE_SUCCESS)
2938 2942 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2939 2943
2940 2944 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2941 2945 ecore_ptt_release(p_hwfn, p_ptt);
2942 2946
2943 2947 return rc;
2944 2948 }
2945 2949
2946 2950 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2947 2951 {
2948 2952 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2949 2953 struct ecore_mcp_nvm_params params;
2950 2954 struct ecore_ptt *p_ptt;
2951 2955
2952 2956 p_ptt = ecore_ptt_acquire(p_hwfn);
2953 2957 if (!p_ptt)
2954 2958 return ECORE_BUSY;
2955 2959
2956 2960 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2957 2961 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2958 2962 ecore_ptt_release(p_hwfn, p_ptt);
2959 2963
2960 2964 return ECORE_SUCCESS;
2961 2965 }
2962 2966
2963 2967 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev,
2964 2968 u32 addr)
2965 2969 {
2966 2970 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2967 2971 struct ecore_mcp_nvm_params params;
2968 2972 struct ecore_ptt *p_ptt;
2969 2973 enum _ecore_status_t rc;
2970 2974
2971 2975 p_ptt = ecore_ptt_acquire(p_hwfn);
2972 2976 if (!p_ptt)
2973 2977 return ECORE_BUSY;
2974 2978 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2975 2979 params.type = ECORE_MCP_CMD;
2976 2980 params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
2977 2981 params.nvm_common.offset = addr;
2978 2982 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2979 2983 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2980 2984 ecore_ptt_release(p_hwfn, p_ptt);
2981 2985
2982 2986 return rc;
2983 2987 }
2984 2988
2985 2989 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2986 2990 u32 addr)
2987 2991 {
2988 2992 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2989 2993 struct ecore_mcp_nvm_params params;
2990 2994 struct ecore_ptt *p_ptt;
2991 2995 enum _ecore_status_t rc;
2992 2996
2993 2997 p_ptt = ecore_ptt_acquire(p_hwfn);
2994 2998 if (!p_ptt)
2995 2999 return ECORE_BUSY;
2996 3000 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2997 3001 params.type = ECORE_MCP_CMD;
2998 3002 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2999 3003 params.nvm_common.offset = addr;
3000 3004 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3001 3005 p_dev->mcp_nvm_resp = params.nvm_common.resp;
3002 3006 ecore_ptt_release(p_hwfn, p_ptt);
3003 3007
3004 3008 return rc;
3005 3009 }
3006 3010
3007 3011 /* rc recieves ECORE_INVAL as default parameter because
3008 3012 * it might not enter the while loop if the len is 0
3009 3013 */
3010 3014 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
3011 3015 u32 addr, u8 *p_buf, u32 len)
3012 3016 {
3013 3017 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3014 3018 enum _ecore_status_t rc = ECORE_INVAL;
3015 3019 struct ecore_mcp_nvm_params params;
3016 3020 struct ecore_ptt *p_ptt;
3017 3021 u32 buf_idx, buf_size;
3018 3022
3019 3023 p_ptt = ecore_ptt_acquire(p_hwfn);
3020 3024 if (!p_ptt)
3021 3025 return ECORE_BUSY;
3022 3026
3023 3027 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3024 3028 params.type = ECORE_MCP_NVM_WR;
3025 3029 switch (cmd) {
3026 3030 case ECORE_PUT_FILE_DATA:
3027 3031 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3028 3032 break;
3029 3033 case ECORE_NVM_WRITE_NVRAM:
3030 3034 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3031 3035 break;
3032 3036 case ECORE_EXT_PHY_FW_UPGRADE:
3033 3037 params.nvm_common.cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
3034 3038 break;
3035 3039 default:
3036 3040 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
3037 3041 cmd);
3038 3042 return ECORE_INVAL;
3039 3043 }
3040 3044
3041 3045 buf_idx = 0;
3042 3046 while (buf_idx < len) {
3043 3047 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3044 3048 MCP_DRV_NVM_BUF_LEN);
3045 3049 params.nvm_common.offset = ((buf_size <<
3046 3050 DRV_MB_PARAM_NVM_LEN_SHIFT)
3047 3051 | addr) + buf_idx;
3048 3052 params.nvm_wr.buf_size = buf_size;
3049 3053 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
3050 3054 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3051 3055 if (rc != ECORE_SUCCESS ||
3052 3056 ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
3053 3057 (params.nvm_common.resp !=
3054 3058 FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
3055 3059 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3056 3060
3057 3061 /* This can be a lengthy process, and it's possible scheduler
3058 3062 * isn't preemptable. Sleep a bit to prevent CPU hogging.
3059 3063 */
3060 3064 if (buf_idx % 0x1000 >
3061 3065 (buf_idx + buf_size) % 0x1000)
3062 3066 OSAL_MSLEEP(1);
3063 3067
3064 3068 buf_idx += buf_size;
3065 3069 }
3066 3070
3067 3071 p_dev->mcp_nvm_resp = params.nvm_common.resp;
3068 3072 ecore_ptt_release(p_hwfn, p_ptt);
3069 3073
3070 3074 return rc;
3071 3075 }
3072 3076
3073 3077 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
3074 3078 u32 addr, u8 *p_buf, u32 len)
3075 3079 {
3076 3080 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3077 3081 struct ecore_mcp_nvm_params params;
3078 3082 struct ecore_ptt *p_ptt;
3079 3083 enum _ecore_status_t rc;
3080 3084
3081 3085 p_ptt = ecore_ptt_acquire(p_hwfn);
3082 3086 if (!p_ptt)
3083 3087 return ECORE_BUSY;
3084 3088
3085 3089 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3086 3090 params.type = ECORE_MCP_NVM_WR;
3087 3091 params.nvm_wr.buf_size = len;
3088 3092 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
3089 3093 DRV_MSG_CODE_PHY_CORE_WRITE :
3090 3094 DRV_MSG_CODE_PHY_RAW_WRITE;
3091 3095 params.nvm_common.offset = addr;
3092 3096 params.nvm_wr.buf = (u32 *)p_buf;
3093 3097 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3094 3098 if (rc != ECORE_SUCCESS)
3095 3099 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3096 3100 p_dev->mcp_nvm_resp = params.nvm_common.resp;
3097 3101 ecore_ptt_release(p_hwfn, p_ptt);
3098 3102
3099 3103 return rc;
3100 3104 }
3101 3105
3102 3106 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
3103 3107 u32 addr)
3104 3108 {
3105 3109 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3106 3110 struct ecore_mcp_nvm_params params;
3107 3111 struct ecore_ptt *p_ptt;
3108 3112 enum _ecore_status_t rc;
3109 3113
3110 3114 p_ptt = ecore_ptt_acquire(p_hwfn);
3111 3115 if (!p_ptt)
3112 3116 return ECORE_BUSY;
3113 3117
3114 3118 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3115 3119 params.type = ECORE_MCP_CMD;
3116 3120 params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
3117 3121 params.nvm_common.offset = addr;
3118 3122 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3119 3123 p_dev->mcp_nvm_resp = params.nvm_common.resp;
3120 3124 ecore_ptt_release(p_hwfn, p_ptt);
↓ open down ↓ |
1674 lines elided |
↑ open up ↑ |
3121 3125
3122 3126 return rc;
3123 3127 }
3124 3128
3125 3129 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
3126 3130 struct ecore_ptt *p_ptt,
3127 3131 u32 port, u32 addr, u32 offset,
3128 3132 u32 len, u8 *p_buf)
3129 3133 {
3130 3134 struct ecore_mcp_nvm_params params;
3131 - enum _ecore_status_t rc;
3132 3135 u32 bytes_left, bytes_to_copy, buf_size;
3133 3136
3134 3137 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3135 3138 params.nvm_common.offset =
3136 3139 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
3137 3140 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
3138 3141 addr = offset;
3139 3142 offset = 0;
3140 3143 bytes_left = len;
3141 3144 params.type = ECORE_MCP_NVM_RD;
3142 3145 params.nvm_rd.buf_size = &buf_size;
3143 3146 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
3144 3147 while (bytes_left > 0) {
3145 3148 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
3146 3149 MAX_I2C_TRANSACTION_SIZE);
3147 3150 params.nvm_rd.buf = (u32 *)(p_buf + offset);
3148 3151 params.nvm_common.offset &=
3149 3152 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3150 3153 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3151 3154 params.nvm_common.offset |=
3152 3155 ((addr + offset) <<
3153 3156 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
3154 3157 params.nvm_common.offset |=
3155 3158 (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
3156 - rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3159 + (void) ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3157 3160 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
3158 3161 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
3159 3162 return ECORE_NODEV;
3160 3163 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
3161 3164 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3162 3165 return ECORE_UNKNOWN_ERROR;
3163 3166
3164 3167 offset += *params.nvm_rd.buf_size;
3165 3168 bytes_left -= *params.nvm_rd.buf_size;
3166 3169 }
3167 3170
3168 3171 return ECORE_SUCCESS;
3169 3172 }
3170 3173
3171 3174 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
3172 3175 struct ecore_ptt *p_ptt,
3173 3176 u32 port, u32 addr, u32 offset,
3174 3177 u32 len, u8 *p_buf)
3175 3178 {
3176 3179 struct ecore_mcp_nvm_params params;
3177 - enum _ecore_status_t rc;
3178 3180 u32 buf_idx, buf_size;
3179 3181
3180 3182 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3181 3183 params.nvm_common.offset =
3182 3184 (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
3183 3185 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
3184 3186 params.type = ECORE_MCP_NVM_WR;
3185 3187 params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
3186 3188 buf_idx = 0;
3187 3189 while (buf_idx < len) {
3188 3190 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3189 3191 MAX_I2C_TRANSACTION_SIZE);
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
3190 3192 params.nvm_common.offset &=
3191 3193 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3192 3194 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3193 3195 params.nvm_common.offset |=
3194 3196 ((offset + buf_idx) <<
3195 3197 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
3196 3198 params.nvm_common.offset |=
3197 3199 (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
3198 3200 params.nvm_wr.buf_size = buf_size;
3199 3201 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
3200 - rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3202 + (void) ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3201 3203 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
3202 3204 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
3203 3205 return ECORE_NODEV;
3204 3206 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
3205 3207 FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3206 3208 return ECORE_UNKNOWN_ERROR;
3207 3209
3208 3210 buf_idx += buf_size;
3209 3211 }
3210 3212
3211 3213 return ECORE_SUCCESS;
3212 3214 }
3213 3215
3214 3216 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3215 3217 struct ecore_ptt *p_ptt,
3216 3218 u16 gpio, u32 *gpio_val)
3217 3219 {
3218 3220 enum _ecore_status_t rc = ECORE_SUCCESS;
3219 3221 u32 drv_mb_param = 0, rsp;
3220 3222
3221 3223 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
3222 3224
3223 3225 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
3224 3226 drv_mb_param, &rsp, gpio_val);
3225 3227
3226 3228 if (rc != ECORE_SUCCESS)
3227 3229 return rc;
3228 3230
3229 3231 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3230 3232 return ECORE_UNKNOWN_ERROR;
3231 3233
3232 3234 return ECORE_SUCCESS;
3233 3235 }
3234 3236
3235 3237 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
3236 3238 struct ecore_ptt *p_ptt,
3237 3239 u16 gpio, u16 gpio_val)
3238 3240 {
3239 3241 enum _ecore_status_t rc = ECORE_SUCCESS;
3240 3242 u32 drv_mb_param = 0, param, rsp;
3241 3243
3242 3244 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
3243 3245 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
3244 3246
3245 3247 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
3246 3248 drv_mb_param, &rsp, ¶m);
3247 3249
3248 3250 if (rc != ECORE_SUCCESS)
3249 3251 return rc;
3250 3252
3251 3253 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3252 3254 return ECORE_UNKNOWN_ERROR;
3253 3255
3254 3256 return ECORE_SUCCESS;
3255 3257 }
3256 3258
3257 3259 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
3258 3260 struct ecore_ptt *p_ptt,
3259 3261 u16 gpio, u32 *gpio_direction,
3260 3262 u32 *gpio_ctrl)
3261 3263 {
3262 3264 u32 drv_mb_param = 0, rsp, val = 0;
3263 3265 enum _ecore_status_t rc = ECORE_SUCCESS;
3264 3266
3265 3267 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
3266 3268
3267 3269 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
3268 3270 drv_mb_param, &rsp, &val);
3269 3271 if (rc != ECORE_SUCCESS)
3270 3272 return rc;
3271 3273
3272 3274 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
3273 3275 DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
3274 3276 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
3275 3277 DRV_MB_PARAM_GPIO_CTRL_SHIFT;
3276 3278
3277 3279 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3278 3280 return ECORE_UNKNOWN_ERROR;
3279 3281
3280 3282 return ECORE_SUCCESS;
3281 3283 }
3282 3284
3283 3285 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
3284 3286 struct ecore_ptt *p_ptt)
3285 3287 {
3286 3288 u32 drv_mb_param = 0, rsp, param;
3287 3289 enum _ecore_status_t rc = ECORE_SUCCESS;
3288 3290
3289 3291 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3290 3292 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3291 3293
3292 3294 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3293 3295 drv_mb_param, &rsp, ¶m);
3294 3296
3295 3297 if (rc != ECORE_SUCCESS)
3296 3298 return rc;
3297 3299
3298 3300 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3299 3301 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3300 3302 rc = ECORE_UNKNOWN_ERROR;
3301 3303
3302 3304 return rc;
3303 3305 }
3304 3306
3305 3307 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
3306 3308 struct ecore_ptt *p_ptt)
3307 3309 {
3308 3310 u32 drv_mb_param, rsp, param;
3309 3311 enum _ecore_status_t rc = ECORE_SUCCESS;
3310 3312
3311 3313 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3312 3314 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3313 3315
3314 3316 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3315 3317 drv_mb_param, &rsp, ¶m);
3316 3318
3317 3319 if (rc != ECORE_SUCCESS)
3318 3320 return rc;
3319 3321
3320 3322 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3321 3323 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3322 3324 rc = ECORE_UNKNOWN_ERROR;
3323 3325
3324 3326 return rc;
3325 3327 }
3326 3328
3327 3329 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3328 3330 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3329 3331 {
3330 3332 u32 drv_mb_param = 0, rsp;
3331 3333 enum _ecore_status_t rc = ECORE_SUCCESS;
3332 3334
3333 3335 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3334 3336 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3335 3337
3336 3338 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3337 3339 drv_mb_param, &rsp, num_images);
3338 3340
3339 3341 if (rc != ECORE_SUCCESS)
3340 3342 return rc;
3341 3343
3342 3344 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3343 3345 rc = ECORE_UNKNOWN_ERROR;
3344 3346
3345 3347 return rc;
3346 3348 }
3347 3349
3348 3350 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3349 3351 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3350 3352 struct bist_nvm_image_att *p_image_att, u32 image_index)
3351 3353 {
3352 3354 struct ecore_mcp_nvm_params params;
3353 3355 enum _ecore_status_t rc;
3354 3356 u32 buf_size;
3355 3357
3356 3358 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
3357 3359 params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3358 3360 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3359 3361 params.nvm_common.offset |= (image_index <<
3360 3362 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
3361 3363
3362 3364 params.type = ECORE_MCP_NVM_RD;
3363 3365 params.nvm_rd.buf_size = &buf_size;
3364 3366 params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
3365 3367 params.nvm_rd.buf = (u32 *)p_image_att;
3366 3368
3367 3369 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3368 3370 if (rc != ECORE_SUCCESS)
3369 3371 return rc;
3370 3372
3371 3373 if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3372 3374 (p_image_att->return_code != 1))
3373 3375 rc = ECORE_UNKNOWN_ERROR;
3374 3376
3375 3377 return rc;
3376 3378 }
3377 3379
3378 3380 enum _ecore_status_t
3379 3381 ecore_mcp_get_nvm_image_att(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3380 3382 enum ecore_nvm_images image_id,
3381 3383 struct ecore_nvm_image_att *p_image_att)
3382 3384 {
3383 3385 struct bist_nvm_image_att mfw_image_att;
3384 3386 enum nvm_image_type type;
3385 3387 u32 num_images, i;
3386 3388 enum _ecore_status_t rc;
3387 3389
3388 3390 /* Translate image_id into MFW definitions */
3389 3391 switch (image_id) {
3390 3392 case ECORE_NVM_IMAGE_ISCSI_CFG:
3391 3393 type = NVM_TYPE_ISCSI_CFG;
3392 3394 break;
3393 3395 case ECORE_NVM_IMAGE_FCOE_CFG:
3394 3396 type = NVM_TYPE_FCOE_CFG;
3395 3397 break;
3396 3398 case ECORE_NVM_IMAGE_MDUMP:
3397 3399 type = NVM_TYPE_MDUMP;
3398 3400 break;
3399 3401 default:
3400 3402 DP_NOTICE(p_hwfn, false, "Unknown request of image_id %08x\n",
3401 3403 image_id);
3402 3404 return ECORE_INVAL;
3403 3405 }
3404 3406
3405 3407 /* Learn number of images, then traverse and see if one fits */
3406 3408 rc = ecore_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
3407 3409 if (rc != ECORE_SUCCESS || !num_images)
3408 3410 return ECORE_INVAL;
3409 3411
3410 3412 for (i = 0; i < num_images; i++) {
3411 3413 rc = ecore_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
3412 3414 &mfw_image_att, i);
3413 3415 if (rc != ECORE_SUCCESS)
3414 3416 return rc;
3415 3417
3416 3418 if (type == mfw_image_att.image_type)
3417 3419 break;
3418 3420 }
3419 3421 if (i == num_images) {
3420 3422 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3421 3423 "Failed to find nvram image of type %08x\n",
3422 3424 image_id);
3423 3425 return ECORE_INVAL;
3424 3426 }
3425 3427
3426 3428 p_image_att->start_addr = mfw_image_att.nvm_start_addr;
3427 3429 p_image_att->length = mfw_image_att.len;
3428 3430
3429 3431 return ECORE_SUCCESS;
3430 3432 }
3431 3433
3432 3434 enum _ecore_status_t ecore_mcp_get_nvm_image(struct ecore_hwfn *p_hwfn,
3433 3435 struct ecore_ptt *p_ptt,
3434 3436 enum ecore_nvm_images image_id,
3435 3437 u8 *p_buffer, u32 buffer_len)
3436 3438 {
3437 3439 struct ecore_nvm_image_att image_att;
3438 3440 enum _ecore_status_t rc;
3439 3441
3440 3442 OSAL_MEM_ZERO(p_buffer, buffer_len);
3441 3443
3442 3444 rc = ecore_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att);
3443 3445 if (rc != ECORE_SUCCESS)
3444 3446 return rc;
3445 3447
3446 3448 /* Validate sizes - both the image's and the supplied buffer's */
3447 3449 if (image_att.length <= 4) {
3448 3450 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3449 3451 "Image [%d] is too small - only %d bytes\n",
3450 3452 image_id, image_att.length);
3451 3453 return ECORE_INVAL;
3452 3454 }
3453 3455
3454 3456 /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */
3455 3457 image_att.length -= 4;
3456 3458
3457 3459 if (image_att.length > buffer_len) {
3458 3460 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3459 3461 "Image [%d] is too big - %08x bytes where only %08x are available\n",
3460 3462 image_id, image_att.length, buffer_len);
3461 3463 return ECORE_NOMEM;
3462 3464 }
3463 3465
3464 3466 return ecore_mcp_nvm_read(p_hwfn->p_dev, image_att.start_addr,
3465 3467 p_buffer, image_att.length);
3466 3468 }
3467 3469
3468 3470 enum _ecore_status_t
3469 3471 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3470 3472 struct ecore_ptt *p_ptt,
3471 3473 struct ecore_temperature_info *p_temp_info)
3472 3474 {
3473 3475 struct ecore_temperature_sensor *p_temp_sensor;
3474 3476 struct temperature_status_stc mfw_temp_info;
3475 3477 struct ecore_mcp_mb_params mb_params;
3476 3478 u32 val;
3477 3479 enum _ecore_status_t rc;
3478 3480 u8 i;
3479 3481
3480 3482 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3481 3483 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3482 3484 mb_params.p_data_dst = &mfw_temp_info;
3483 3485 mb_params.data_dst_size = sizeof(mfw_temp_info);
3484 3486 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3485 3487 if (rc != ECORE_SUCCESS)
3486 3488 return rc;
3487 3489
3488 3490 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3489 3491 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3490 3492 ECORE_MAX_NUM_OF_SENSORS);
3491 3493 for (i = 0; i < p_temp_info->num_sensors; i++) {
3492 3494 val = mfw_temp_info.sensor[i];
3493 3495 p_temp_sensor = &p_temp_info->sensors[i];
3494 3496 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3495 3497 SENSOR_LOCATION_SHIFT;
3496 3498 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3497 3499 THRESHOLD_HIGH_SHIFT;
3498 3500 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3499 3501 CRITICAL_TEMPERATURE_SHIFT;
3500 3502 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3501 3503 CURRENT_TEMP_SHIFT;
3502 3504 }
3503 3505
3504 3506 return ECORE_SUCCESS;
3505 3507 }
3506 3508
3507 3509 enum _ecore_status_t ecore_mcp_get_mba_versions(
3508 3510 struct ecore_hwfn *p_hwfn,
3509 3511 struct ecore_ptt *p_ptt,
3510 3512 struct ecore_mba_vers *p_mba_vers)
3511 3513 {
3512 3514 struct ecore_mcp_nvm_params params;
3513 3515 enum _ecore_status_t rc;
3514 3516 u32 buf_size;
3515 3517
3516 3518 OSAL_MEM_ZERO(¶ms, sizeof(params));
3517 3519 params.type = ECORE_MCP_NVM_RD;
3518 3520 params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
3519 3521 params.nvm_common.offset = 0;
3520 3522 params.nvm_rd.buf = &(p_mba_vers->mba_vers[0]);
3521 3523 params.nvm_rd.buf_size = &buf_size;
3522 3524 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
3523 3525
3524 3526 if (rc != ECORE_SUCCESS)
3525 3527 return rc;
3526 3528
3527 3529 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
3528 3530 FW_MSG_CODE_NVM_OK)
3529 3531 rc = ECORE_UNKNOWN_ERROR;
3530 3532
3531 3533 if (buf_size != MCP_DRV_NVM_BUF_LEN)
3532 3534 rc = ECORE_UNKNOWN_ERROR;
3533 3535
3534 3536 return rc;
3535 3537 }
3536 3538
3537 3539 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3538 3540 struct ecore_ptt *p_ptt,
3539 3541 u64 *num_events)
3540 3542 {
3541 3543 u32 rsp;
3542 3544
3543 3545 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3544 3546 0, &rsp, (u32 *)num_events);
3545 3547 }
3546 3548
3547 3549 static enum resource_id_enum
3548 3550 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3549 3551 {
3550 3552 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3551 3553
3552 3554 switch (res_id) {
3553 3555 case ECORE_SB:
3554 3556 mfw_res_id = RESOURCE_NUM_SB_E;
3555 3557 break;
3556 3558 case ECORE_L2_QUEUE:
3557 3559 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3558 3560 break;
3559 3561 case ECORE_VPORT:
3560 3562 mfw_res_id = RESOURCE_NUM_VPORT_E;
3561 3563 break;
3562 3564 case ECORE_RSS_ENG:
3563 3565 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3564 3566 break;
3565 3567 case ECORE_PQ:
3566 3568 mfw_res_id = RESOURCE_NUM_PQ_E;
3567 3569 break;
3568 3570 case ECORE_RL:
3569 3571 mfw_res_id = RESOURCE_NUM_RL_E;
3570 3572 break;
3571 3573 case ECORE_MAC:
3572 3574 case ECORE_VLAN:
3573 3575 /* Each VFC resource can accommodate both a MAC and a VLAN */
3574 3576 mfw_res_id = RESOURCE_VFC_FILTER_E;
3575 3577 break;
3576 3578 case ECORE_ILT:
3577 3579 mfw_res_id = RESOURCE_ILT_E;
3578 3580 break;
3579 3581 case ECORE_LL2_QUEUE:
3580 3582 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3581 3583 break;
3582 3584 case ECORE_RDMA_CNQ_RAM:
3583 3585 case ECORE_CMDQS_CQS:
3584 3586 /* CNQ/CMDQS are the same resource */
3585 3587 mfw_res_id = RESOURCE_CQS_E;
3586 3588 break;
3587 3589 case ECORE_RDMA_STATS_QUEUE:
3588 3590 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3589 3591 break;
3590 3592 case ECORE_BDQ:
3591 3593 mfw_res_id = RESOURCE_BDQ_E;
3592 3594 break;
3593 3595 default:
3594 3596 break;
3595 3597 }
3596 3598
3597 3599 return mfw_res_id;
3598 3600 }
3599 3601
3600 3602 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2
3601 3603 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
3602 3604 #define ECORE_RESC_ALLOC_VERSION \
3603 3605 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
3604 3606 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
3605 3607 (ECORE_RESC_ALLOC_VERSION_MINOR << \
3606 3608 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
3607 3609
3608 3610 struct ecore_resc_alloc_in_params {
3609 3611 u32 cmd;
3610 3612 enum ecore_resources res_id;
3611 3613 u32 resc_max_val;
3612 3614 };
3613 3615
3614 3616 struct ecore_resc_alloc_out_params {
3615 3617 u32 mcp_resp;
3616 3618 u32 mcp_param;
3617 3619 u32 resc_num;
3618 3620 u32 resc_start;
3619 3621 u32 vf_resc_num;
3620 3622 u32 vf_resc_start;
3621 3623 u32 flags;
3622 3624 };
3623 3625
3624 3626 static enum _ecore_status_t
3625 3627 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
3626 3628 struct ecore_ptt *p_ptt,
3627 3629 struct ecore_resc_alloc_in_params *p_in_params,
3628 3630 struct ecore_resc_alloc_out_params *p_out_params)
3629 3631 {
3630 3632 struct ecore_mcp_mb_params mb_params;
3631 3633 struct resource_info mfw_resc_info;
3632 3634 enum _ecore_status_t rc;
3633 3635
3634 3636 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
3635 3637
3636 3638 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
3637 3639 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3638 3640 DP_ERR(p_hwfn,
3639 3641 "Failed to match resource %d [%s] with the MFW resources\n",
3640 3642 p_in_params->res_id,
3641 3643 ecore_hw_get_resc_name(p_in_params->res_id));
3642 3644 return ECORE_INVAL;
3643 3645 }
3644 3646
3645 3647 switch (p_in_params->cmd) {
3646 3648 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3647 3649 mfw_resc_info.size = p_in_params->resc_max_val;
3648 3650 /* Fallthrough */
3649 3651 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3650 3652 break;
3651 3653 default:
3652 3654 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3653 3655 p_in_params->cmd);
3654 3656 return ECORE_INVAL;
3655 3657 }
3656 3658
3657 3659 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3658 3660 mb_params.cmd = p_in_params->cmd;
3659 3661 mb_params.param = ECORE_RESC_ALLOC_VERSION;
3660 3662 mb_params.p_data_src = &mfw_resc_info;
3661 3663 mb_params.data_src_size = sizeof(mfw_resc_info);
3662 3664 mb_params.p_data_dst = mb_params.p_data_src;
3663 3665 mb_params.data_dst_size = mb_params.data_src_size;
3664 3666
3665 3667 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3666 3668 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3667 3669 p_in_params->cmd, p_in_params->res_id,
3668 3670 ecore_hw_get_resc_name(p_in_params->res_id),
3669 3671 ECORE_MFW_GET_FIELD(mb_params.param,
3670 3672 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3671 3673 ECORE_MFW_GET_FIELD(mb_params.param,
3672 3674 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3673 3675 p_in_params->resc_max_val);
3674 3676
3675 3677 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3676 3678 if (rc != ECORE_SUCCESS)
3677 3679 return rc;
3678 3680
3679 3681 p_out_params->mcp_resp = mb_params.mcp_resp;
3680 3682 p_out_params->mcp_param = mb_params.mcp_param;
3681 3683 p_out_params->resc_num = mfw_resc_info.size;
3682 3684 p_out_params->resc_start = mfw_resc_info.offset;
3683 3685 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3684 3686 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3685 3687 p_out_params->flags = mfw_resc_info.flags;
3686 3688
3687 3689 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3688 3690 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3689 3691 ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
3690 3692 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3691 3693 ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
3692 3694 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3693 3695 p_out_params->resc_num, p_out_params->resc_start,
3694 3696 p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3695 3697 p_out_params->flags);
3696 3698
3697 3699 return ECORE_SUCCESS;
3698 3700 }
3699 3701
3700 3702 enum _ecore_status_t
3701 3703 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3702 3704 enum ecore_resources res_id, u32 resc_max_val,
3703 3705 u32 *p_mcp_resp)
3704 3706 {
3705 3707 struct ecore_resc_alloc_out_params out_params;
3706 3708 struct ecore_resc_alloc_in_params in_params;
3707 3709 enum _ecore_status_t rc;
3708 3710
3709 3711 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3710 3712 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3711 3713 in_params.res_id = res_id;
3712 3714 in_params.resc_max_val = resc_max_val;
3713 3715 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3714 3716 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3715 3717 &out_params);
3716 3718 if (rc != ECORE_SUCCESS)
3717 3719 return rc;
3718 3720
3719 3721 *p_mcp_resp = out_params.mcp_resp;
3720 3722
3721 3723 return ECORE_SUCCESS;
3722 3724 }
3723 3725
3724 3726 enum _ecore_status_t
3725 3727 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3726 3728 enum ecore_resources res_id, u32 *p_mcp_resp,
3727 3729 u32 *p_resc_num, u32 *p_resc_start)
3728 3730 {
3729 3731 struct ecore_resc_alloc_out_params out_params;
3730 3732 struct ecore_resc_alloc_in_params in_params;
3731 3733 enum _ecore_status_t rc;
3732 3734
3733 3735 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3734 3736 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3735 3737 in_params.res_id = res_id;
3736 3738 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3737 3739 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3738 3740 &out_params);
3739 3741 if (rc != ECORE_SUCCESS)
3740 3742 return rc;
3741 3743
3742 3744 *p_mcp_resp = out_params.mcp_resp;
3743 3745
3744 3746 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3745 3747 *p_resc_num = out_params.resc_num;
3746 3748 *p_resc_start = out_params.resc_start;
3747 3749 }
3748 3750
3749 3751 return ECORE_SUCCESS;
3750 3752 }
3751 3753
3752 3754 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3753 3755 struct ecore_ptt *p_ptt)
3754 3756 {
3755 3757 u32 mcp_resp, mcp_param;
3756 3758
3757 3759 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3758 3760 &mcp_resp, &mcp_param);
3759 3761 }
3760 3762
3761 3763 enum _ecore_status_t ecore_mcp_get_lldp_mac(struct ecore_hwfn *p_hwfn,
3762 3764 struct ecore_ptt *p_ptt,
3763 3765 u8 lldp_mac_addr[ETH_ALEN])
3764 3766 {
3765 3767 struct ecore_mcp_mb_params mb_params;
3766 3768 struct mcp_mac lldp_mac;
3767 3769 enum _ecore_status_t rc;
3768 3770
3769 3771 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3770 3772 mb_params.cmd = DRV_MSG_CODE_GET_LLDP_MAC;
3771 3773 mb_params.p_data_dst = &lldp_mac;
3772 3774 mb_params.data_dst_size = sizeof(lldp_mac);
3773 3775 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3774 3776 if (rc != ECORE_SUCCESS)
3775 3777 return rc;
3776 3778
3777 3779 if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
3778 3780 DP_NOTICE(p_hwfn, false,
3779 3781 "MFW lacks support for the GET_LLDP_MAC command [resp 0x%08x]\n",
3780 3782 mb_params.mcp_resp);
3781 3783 return ECORE_INVAL;
3782 3784 }
3783 3785
3784 3786 *(u16 *)lldp_mac_addr = *(u16 *)&lldp_mac.mac_upper;
3785 3787 *(u32 *)(lldp_mac_addr + 2) = lldp_mac.mac_lower;
3786 3788
3787 3789 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3788 3790 "LLDP MAC address is %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
3789 3791 lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2],
3790 3792 lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]);
3791 3793
3792 3794 return ECORE_SUCCESS;
3793 3795 }
3794 3796
3795 3797 enum _ecore_status_t ecore_mcp_set_lldp_mac(struct ecore_hwfn *p_hwfn,
3796 3798 struct ecore_ptt *p_ptt,
3797 3799 u8 lldp_mac_addr[ETH_ALEN])
3798 3800 {
3799 3801 struct ecore_mcp_mb_params mb_params;
3800 3802 struct mcp_mac lldp_mac;
3801 3803 enum _ecore_status_t rc;
3802 3804
3803 3805 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3804 3806 "Configuring LLDP MAC address to %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
3805 3807 lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2],
3806 3808 lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]);
3807 3809
3808 3810 OSAL_MEM_ZERO(&lldp_mac, sizeof(lldp_mac));
3809 3811 lldp_mac.mac_upper = *(u16 *)lldp_mac_addr;
3810 3812 lldp_mac.mac_lower = *(u32 *)(lldp_mac_addr + 2);
3811 3813
3812 3814 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3813 3815 mb_params.cmd = DRV_MSG_CODE_SET_LLDP_MAC;
3814 3816 mb_params.p_data_src = &lldp_mac;
3815 3817 mb_params.data_src_size = sizeof(lldp_mac);
3816 3818 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3817 3819 if (rc != ECORE_SUCCESS)
3818 3820 return rc;
3819 3821
3820 3822 if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
3821 3823 DP_NOTICE(p_hwfn, false,
3822 3824 "MFW lacks support for the SET_LLDP_MAC command [resp 0x%08x]\n",
3823 3825 mb_params.mcp_resp);
3824 3826 return ECORE_INVAL;
3825 3827 }
3826 3828
3827 3829 return ECORE_SUCCESS;
3828 3830 }
3829 3831
3830 3832 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3831 3833 struct ecore_ptt *p_ptt,
3832 3834 u32 param, u32 *p_mcp_resp,
3833 3835 u32 *p_mcp_param)
3834 3836 {
3835 3837 enum _ecore_status_t rc;
3836 3838
3837 3839 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3838 3840 p_mcp_resp, p_mcp_param);
3839 3841 if (rc != ECORE_SUCCESS)
3840 3842 return rc;
3841 3843
3842 3844 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3843 3845 DP_INFO(p_hwfn,
3844 3846 "The resource command is unsupported by the MFW\n");
3845 3847 return ECORE_NOTIMPL;
3846 3848 }
3847 3849
3848 3850 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3849 3851 u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3850 3852
3851 3853 DP_NOTICE(p_hwfn, false,
3852 3854 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3853 3855 param, opcode);
3854 3856 return ECORE_INVAL;
3855 3857 }
3856 3858
3857 3859 return rc;
3858 3860 }
3859 3861
3860 3862 enum _ecore_status_t
3861 3863 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3862 3864 struct ecore_resc_lock_params *p_params)
3863 3865 {
3864 3866 u32 param = 0, mcp_resp, mcp_param;
3865 3867 u8 opcode;
3866 3868 enum _ecore_status_t rc;
3867 3869
3868 3870 switch (p_params->timeout) {
3869 3871 case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3870 3872 opcode = RESOURCE_OPCODE_REQ;
3871 3873 p_params->timeout = 0;
3872 3874 break;
3873 3875 case ECORE_MCP_RESC_LOCK_TO_NONE:
3874 3876 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3875 3877 p_params->timeout = 0;
3876 3878 break;
3877 3879 default:
3878 3880 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3879 3881 break;
3880 3882 }
3881 3883
3882 3884 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3883 3885 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3884 3886 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3885 3887
3886 3888 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3887 3889 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3888 3890 param, p_params->timeout, opcode, p_params->resource);
3889 3891
3890 3892 /* Attempt to acquire the resource */
3891 3893 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3892 3894 &mcp_param);
3893 3895 if (rc != ECORE_SUCCESS)
3894 3896 return rc;
3895 3897
3896 3898 /* Analyze the response */
3897 3899 p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
3898 3900 RESOURCE_CMD_RSP_OWNER);
3899 3901 opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3900 3902
3901 3903 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3902 3904 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3903 3905 mcp_param, opcode, p_params->owner);
3904 3906
3905 3907 switch (opcode) {
3906 3908 case RESOURCE_OPCODE_GNT:
3907 3909 p_params->b_granted = true;
3908 3910 break;
3909 3911 case RESOURCE_OPCODE_BUSY:
3910 3912 p_params->b_granted = false;
3911 3913 break;
3912 3914 default:
3913 3915 DP_NOTICE(p_hwfn, false,
3914 3916 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3915 3917 mcp_param, opcode);
3916 3918 return ECORE_INVAL;
3917 3919 }
3918 3920
3919 3921 return ECORE_SUCCESS;
3920 3922 }
3921 3923
3922 3924 enum _ecore_status_t
3923 3925 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3924 3926 struct ecore_resc_lock_params *p_params)
3925 3927 {
3926 3928 u32 retry_cnt = 0;
3927 3929 enum _ecore_status_t rc;
3928 3930
3929 3931 do {
3930 3932 /* No need for an interval before the first iteration */
3931 3933 if (retry_cnt) {
3932 3934 if (p_params->sleep_b4_retry) {
3933 3935 u16 retry_interval_in_ms =
3934 3936 DIV_ROUND_UP(p_params->retry_interval,
3935 3937 1000);
3936 3938
3937 3939 OSAL_MSLEEP(retry_interval_in_ms);
3938 3940 } else {
3939 3941 OSAL_UDELAY(p_params->retry_interval);
3940 3942 }
3941 3943 }
3942 3944
3943 3945 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3944 3946 if (rc != ECORE_SUCCESS)
3945 3947 return rc;
3946 3948
3947 3949 if (p_params->b_granted)
3948 3950 break;
3949 3951 } while (retry_cnt++ < p_params->retry_num);
3950 3952
3951 3953 return ECORE_SUCCESS;
3952 3954 }
3953 3955
3954 3956 enum _ecore_status_t
3955 3957 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3956 3958 struct ecore_resc_unlock_params *p_params)
3957 3959 {
3958 3960 u32 param = 0, mcp_resp, mcp_param;
3959 3961 u8 opcode;
3960 3962 enum _ecore_status_t rc;
3961 3963
3962 3964 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3963 3965 : RESOURCE_OPCODE_RELEASE;
3964 3966 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3965 3967 ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3966 3968
3967 3969 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3968 3970 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3969 3971 param, opcode, p_params->resource);
3970 3972
3971 3973 /* Attempt to release the resource */
3972 3974 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3973 3975 &mcp_param);
3974 3976 if (rc != ECORE_SUCCESS)
3975 3977 return rc;
3976 3978
3977 3979 /* Analyze the response */
3978 3980 opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3979 3981
3980 3982 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3981 3983 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3982 3984 mcp_param, opcode);
3983 3985
3984 3986 switch (opcode) {
3985 3987 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3986 3988 DP_INFO(p_hwfn,
3987 3989 "Resource unlock request for an already released resource [%d]\n",
3988 3990 p_params->resource);
3989 3991 /* Fallthrough */
3990 3992 case RESOURCE_OPCODE_RELEASED:
3991 3993 p_params->b_released = true;
3992 3994 break;
3993 3995 case RESOURCE_OPCODE_WRONG_OWNER:
3994 3996 p_params->b_released = false;
3995 3997 break;
3996 3998 default:
3997 3999 DP_NOTICE(p_hwfn, false,
3998 4000 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3999 4001 mcp_param, opcode);
4000 4002 return ECORE_INVAL;
4001 4003 }
4002 4004
4003 4005 return ECORE_SUCCESS;
4004 4006 }
4005 4007
4006 4008 enum _ecore_status_t
4007 4009 ecore_mcp_update_fcoe_cvid(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4008 4010 u16 vlan)
4009 4011 {
4010 4012 u32 resp = 0, param = 0;
4011 4013 enum _ecore_status_t rc;
4012 4014
4013 4015 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OEM_UPDATE_FCOE_CVID,
4014 4016 (u32)vlan << DRV_MB_PARAM_FCOE_CVID_SHIFT,
4015 4017 &resp, ¶m);
4016 4018 if (rc != ECORE_SUCCESS)
4017 4019 DP_ERR(p_hwfn, "Failed to update fcoe vlan, rc = %d\n", rc);
4018 4020
4019 4021 return rc;
4020 4022 }
4021 4023
4022 4024 enum _ecore_status_t
4023 4025 ecore_mcp_update_fcoe_fabric_name(struct ecore_hwfn *p_hwfn,
4024 4026 struct ecore_ptt *p_ptt, u8 *wwn)
4025 4027 {
4026 4028 struct ecore_mcp_mb_params mb_params;
4027 4029 struct mcp_wwn fabric_name;
4028 4030 enum _ecore_status_t rc;
4029 4031
4030 4032 OSAL_MEM_ZERO(&fabric_name, sizeof(fabric_name));
4031 4033 fabric_name.wwn_upper = *(u32 *)wwn;
4032 4034 fabric_name.wwn_lower = *(u32 *)(wwn + 4);
4033 4035
4034 4036 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4035 4037 mb_params.cmd = DRV_MSG_CODE_OEM_UPDATE_FCOE_FABRIC_NAME;
4036 4038 mb_params.p_data_src = &fabric_name;
4037 4039 mb_params.data_src_size = sizeof(fabric_name);
4038 4040 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4039 4041 if (rc != ECORE_SUCCESS)
4040 4042 DP_ERR(p_hwfn, "Failed to update fcoe wwn, rc = %d\n", rc);
4041 4043
4042 4044 return rc;
4043 4045 }
4044 4046
4045 4047 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4046 4048 u32 offset, u32 val)
4047 4049 {
4048 4050 struct ecore_mcp_mb_params mb_params = {0};
4049 4051 enum _ecore_status_t rc = ECORE_SUCCESS;
4050 4052 u32 dword = val;
4051 4053
4052 4054 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
4053 4055 mb_params.param = offset;
4054 4056 mb_params.p_data_src = &dword;
4055 4057 mb_params.data_src_size = sizeof(dword);
4056 4058
4057 4059 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4058 4060 if (rc != ECORE_SUCCESS) {
4059 4061 DP_NOTICE(p_hwfn, false,
4060 4062 "Failed to wol write request, rc = %d\n", rc);
4061 4063 }
4062 4064
4063 4065 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
4064 4066 DP_NOTICE(p_hwfn, false,
4065 4067 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
4066 4068 val, offset, mb_params.mcp_resp);
4067 4069 rc = ECORE_UNKNOWN_ERROR;
4068 4070 }
4069 4071 }
4070 4072
4071 4073 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
4072 4074 struct ecore_ptt *p_ptt)
4073 4075 {
4074 4076 u32 mcp_resp;
4075 4077 enum _ecore_status_t rc;
4076 4078
4077 4079 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
4078 4080 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
4079 4081 if (rc == ECORE_SUCCESS)
4080 4082 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
4081 4083 "MFW supported features: %08x\n",
4082 4084 p_hwfn->mcp_info->capabilities);
4083 4085
4084 4086 return rc;
4085 4087 }
4086 4088
4087 4089 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
4088 4090 struct ecore_ptt *p_ptt)
4089 4091 {
4090 4092 u32 mcp_resp, mcp_param, features;
4091 4093
4092 4094 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
4093 4095 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
4094 4096
4095 4097 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
4096 4098 features, &mcp_resp, &mcp_param);
4097 4099 }
↓ open down ↓ |
887 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX