Print this page
5083 avoid undefined order of operations in assignments
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/hxge/hxge_virtual.c
+++ new/usr/src/uts/common/io/hxge/hxge_virtual.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 * Copyright 2012 Milan Jurik. All rights reserved.
25 25 */
26 26
27 27 #include <hxge_impl.h>
28 28 #include <hxge_vmac.h>
29 29 #include <hxge_pfc.h>
30 30 #include <hpi_pfc.h>
31 31
32 32 static hxge_status_t hxge_get_mac_addr_properties(p_hxge_t);
33 33 static void hxge_use_cfg_hydra_properties(p_hxge_t);
34 34 static void hxge_use_cfg_dma_config(p_hxge_t);
35 35 static void hxge_use_cfg_class_config(p_hxge_t);
36 36 static void hxge_set_hw_dma_config(p_hxge_t);
37 37 static void hxge_set_hw_class_config(p_hxge_t);
38 38 static void hxge_ldgv_setup(p_hxge_ldg_t *ldgp, p_hxge_ldv_t *ldvp, uint8_t ldv,
39 39 uint8_t endldg, int *ngrps);
40 40
41 41 extern uint16_t hxge_rcr_timeout;
42 42 extern uint16_t hxge_rcr_threshold;
43 43
44 44 extern uint32_t hxge_rbr_size;
45 45 extern uint32_t hxge_rcr_size;
46 46
47 47 extern uint_t hxge_rx_intr(caddr_t, caddr_t);
48 48 extern uint_t hxge_tx_intr(caddr_t, caddr_t);
49 49 extern uint_t hxge_vmac_intr(caddr_t, caddr_t);
50 50 extern uint_t hxge_syserr_intr(caddr_t, caddr_t);
51 51 extern uint_t hxge_pfc_intr(caddr_t, caddr_t);
52 52
53 53 /*
54 54 * Entry point to populate configuration parameters into the master hxge
55 55 * data structure and to update the NDD parameter list.
56 56 */
57 57 hxge_status_t
58 58 hxge_get_config_properties(p_hxge_t hxgep)
59 59 {
60 60 hxge_status_t status = HXGE_OK;
61 61
62 62 HXGE_DEBUG_MSG((hxgep, VPD_CTL, " ==> hxge_get_config_properties"));
63 63
64 64 if (hxgep->hxge_hw_p == NULL) {
65 65 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
66 66 " hxge_get_config_properties: common hardware not set"));
67 67 return (HXGE_ERROR);
68 68 }
69 69
70 70 hxgep->classifier.tcam_size = TCAM_HXGE_TCAM_MAX_ENTRY;
71 71
72 72 status = hxge_get_mac_addr_properties(hxgep);
73 73 if (status != HXGE_OK) {
74 74 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
75 75 " hxge_get_config_properties: mac addr properties failed"));
76 76 return (status);
77 77 }
78 78
79 79 HXGE_DEBUG_MSG((hxgep, VPD_CTL,
80 80 " ==> hxge_get_config_properties: Hydra"));
81 81
82 82 hxge_use_cfg_hydra_properties(hxgep);
83 83
84 84 HXGE_DEBUG_MSG((hxgep, VPD_CTL, " <== hxge_get_config_properties"));
85 85 return (HXGE_OK);
86 86 }
87 87
88 88
89 89 static void
90 90 hxge_set_hw_vlan_class_config(p_hxge_t hxgep)
91 91 {
92 92 int i;
93 93 p_hxge_param_t param_arr;
94 94 uint_t vlan_cnt;
95 95 int *vlan_cfg_val;
96 96 hxge_param_map_t *vmap;
97 97 char *prop;
98 98 p_hxge_class_pt_cfg_t p_class_cfgp;
99 99 uint32_t good_cfg[32];
100 100 int good_count = 0;
101 101 hxge_mv_cfg_t *vlan_tbl;
102 102
103 103 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_set_hw_vlan_config"));
104 104 p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
105 105
106 106 param_arr = hxgep->param_arr;
107 107 prop = param_arr[param_vlan_ids].fcode_name;
108 108
109 109 /*
110 110 * uint32_t array, each array entry specifying a VLAN id
111 111 */
112 112 for (i = 0; i <= VLAN_ID_MAX; i++) {
113 113 p_class_cfgp->vlan_tbl[i].flag = 0;
114 114 }
115 115
116 116 vlan_tbl = (hxge_mv_cfg_t *)&p_class_cfgp->vlan_tbl[0];
117 117 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop,
118 118 &vlan_cfg_val, &vlan_cnt) != DDI_PROP_SUCCESS) {
119 119 return;
120 120 }
121 121
122 122 for (i = 0; i < vlan_cnt; i++) {
123 123 vmap = (hxge_param_map_t *)&vlan_cfg_val[i];
124 124 if ((vmap->param_id) && (vmap->param_id <= VLAN_ID_MAX)) {
125 125 HXGE_DEBUG_MSG((hxgep, CFG2_CTL,
126 126 " hxge_vlan_config vlan id %d", vmap->param_id));
127 127
128 128 good_cfg[good_count] = vlan_cfg_val[i];
129 129 if (vlan_tbl[vmap->param_id].flag == 0)
130 130 good_count++;
131 131
132 132 vlan_tbl[vmap->param_id].flag = 1;
133 133 }
134 134 }
135 135
136 136 ddi_prop_free(vlan_cfg_val);
137 137 if (good_count != vlan_cnt) {
138 138 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
139 139 hxgep->dip, prop, (int *)good_cfg, good_count);
140 140 }
141 141
142 142 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_set_hw_vlan_config"));
143 143 }
144 144
145 145
146 146 /*
147 147 * Read param_vlan_ids and param_implicit_vlan_id properties from either
148 148 * hxge.conf or OBP. Update the soft properties. Populate these
149 149 * properties into the hxge data structure.
150 150 */
151 151 static void
152 152 hxge_use_cfg_vlan_class_config(p_hxge_t hxgep)
153 153 {
154 154 uint_t vlan_cnt;
155 155 int *vlan_cfg_val;
156 156 int status;
157 157 p_hxge_param_t param_arr;
158 158 char *prop;
159 159 uint32_t implicit_vlan_id = 0;
160 160 int *int_prop_val;
161 161 uint_t prop_len;
162 162 p_hxge_param_t pa;
163 163
164 164 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_use_cfg_vlan_config"));
165 165 param_arr = hxgep->param_arr;
166 166 prop = param_arr[param_vlan_ids].fcode_name;
167 167
168 168 status = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop,
169 169 &vlan_cfg_val, &vlan_cnt);
170 170 if (status == DDI_PROP_SUCCESS) {
171 171 status = ddi_prop_update_int_array(DDI_DEV_T_NONE,
172 172 hxgep->dip, prop, vlan_cfg_val, vlan_cnt);
173 173 ddi_prop_free(vlan_cfg_val);
174 174 }
175 175
176 176 pa = ¶m_arr[param_implicit_vlan_id];
177 177 prop = pa->fcode_name;
178 178 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop,
179 179 &int_prop_val, &prop_len) == DDI_PROP_SUCCESS) {
180 180 implicit_vlan_id = (uint32_t)*int_prop_val;
181 181 if ((implicit_vlan_id >= pa->minimum) ||
182 182 (implicit_vlan_id <= pa->maximum)) {
183 183 status = ddi_prop_update_int(DDI_DEV_T_NONE, hxgep->dip,
184 184 prop, (int)implicit_vlan_id);
185 185 }
186 186 ddi_prop_free(int_prop_val);
187 187 }
188 188
189 189 hxge_set_hw_vlan_class_config(hxgep);
190 190
191 191 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_use_cfg_vlan_config"));
192 192 }
193 193
194 194 /*
195 195 * Read in the configuration parameters from either hxge.conf or OBP and
196 196 * populate the master data structure hxge.
197 197 * Use these parameters to update the soft properties and the ndd array.
198 198 */
199 199 static void
200 200 hxge_use_cfg_hydra_properties(p_hxge_t hxgep)
201 201 {
202 202 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_use_cfg_hydra_properties"));
203 203
204 204 (void) hxge_use_cfg_dma_config(hxgep);
205 205 (void) hxge_use_cfg_vlan_class_config(hxgep);
206 206 (void) hxge_use_cfg_class_config(hxgep);
207 207
208 208 /*
209 209 * Read in the hardware (fcode) properties and use these properties
210 210 * to update the ndd array.
211 211 */
212 212 (void) hxge_get_param_soft_properties(hxgep);
213 213 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_use_cfg_hydra_properties"));
214 214 }
215 215
216 216
217 217 /*
218 218 * Read param_accept_jumbo, param_rxdma_intr_time, and param_rxdma_intr_pkts
219 219 * from either hxge.conf or OBP.
220 220 * Update the soft properties.
221 221 * Populate these properties into the hxge data structure for latter use.
222 222 */
223 223 static void
224 224 hxge_use_cfg_dma_config(p_hxge_t hxgep)
225 225 {
226 226 int tx_ndmas, rx_ndmas;
227 227 p_hxge_dma_pt_cfg_t p_dma_cfgp;
228 228 p_hxge_hw_pt_cfg_t p_cfgp;
229 229 dev_info_t *dip;
230 230 p_hxge_param_t param_arr;
231 231 char *prop;
232 232 int *prop_val;
233 233 uint_t prop_len;
234 234
235 235 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_use_cfg_dma_config"));
236 236 param_arr = hxgep->param_arr;
237 237
238 238 p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
239 239 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
240 240 dip = hxgep->dip;
241 241
242 242 tx_ndmas = 4;
243 243 p_cfgp->start_tdc = 0;
244 244 p_cfgp->max_tdcs = hxgep->max_tdcs = tx_ndmas;
245 245 hxgep->tdc_mask = (tx_ndmas - 1);
246 246 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_use_cfg_dma_config: "
247 247 "p_cfgp 0x%llx max_tdcs %d hxgep->max_tdcs %d",
248 248 p_cfgp, p_cfgp->max_tdcs, hxgep->max_tdcs));
249 249
250 250 rx_ndmas = 4;
251 251 p_cfgp->start_rdc = 0;
252 252 p_cfgp->max_rdcs = hxgep->max_rdcs = rx_ndmas;
253 253
254 254 p_cfgp->start_ldg = 0;
255 255 p_cfgp->max_ldgs = HXGE_INT_MAX_LDG;
256 256
257 257 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_use_default_dma_config: "
258 258 "p_cfgp 0x%llx max_rdcs %d hxgep->max_rdcs %d",
259 259 p_cfgp, p_cfgp->max_rdcs, hxgep->max_rdcs));
260 260
261 261 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_use_cfg_dma_config: "
262 262 "p_cfgp 0x%016llx start_ldg %d hxgep->max_ldgs %d ",
263 263 p_cfgp, p_cfgp->start_ldg, p_cfgp->max_ldgs));
264 264
265 265 /*
266 266 * add code for individual rdc properties
267 267 */
268 268 prop = param_arr[param_accept_jumbo].fcode_name;
269 269
270 270 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
271 271 &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
272 272 if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) {
273 273 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
274 274 hxgep->dip, prop, prop_val, prop_len);
275 275 }
276 276 ddi_prop_free(prop_val);
277 277 }
278 278
279 279 prop = param_arr[param_rxdma_intr_time].fcode_name;
280 280
281 281 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
282 282 &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
283 283 if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) {
284 284 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
285 285 hxgep->dip, prop, prop_val, prop_len);
286 286 }
287 287 ddi_prop_free(prop_val);
288 288 }
289 289
290 290 prop = param_arr[param_rxdma_intr_pkts].fcode_name;
291 291
292 292 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
293 293 &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
294 294 if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) {
295 295 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
296 296 hxgep->dip, prop, prop_val, prop_len);
297 297 }
298 298 ddi_prop_free(prop_val);
299 299 }
300 300
301 301 hxge_set_hw_dma_config(hxgep);
302 302 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "<== hxge_use_cfg_dma_config"));
303 303 }
304 304
305 305 static void
306 306 hxge_use_cfg_class_config(p_hxge_t hxgep)
307 307 {
308 308 hxge_set_hw_class_config(hxgep);
309 309 }
310 310
311 311 static void
312 312 hxge_set_hw_dma_config(p_hxge_t hxgep)
313 313 {
314 314 p_hxge_dma_pt_cfg_t p_dma_cfgp;
315 315 p_hxge_hw_pt_cfg_t p_cfgp;
316 316
317 317 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_set_hw_dma_config"));
318 318
319 319 p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
320 320 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
321 321
322 322 /* Transmit DMA Channels */
323 323 hxgep->ntdc = p_cfgp->max_tdcs;
324 324
325 325 /* Receive DMA Channels */
326 326 hxgep->nrdc = p_cfgp->max_rdcs;
327 327
328 328 p_dma_cfgp->rbr_size = hxge_rbr_size;
329 329 if (hxge_rcr_size > HXGE_RCR_MAX)
330 330 hxge_rcr_size = HXGE_RCR_MAX;
331 331 p_dma_cfgp->rcr_size = hxge_rcr_size;
332 332
333 333 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_set_hw_dma_config"));
334 334 }
335 335
336 336
337 337 boolean_t
338 338 hxge_check_rxdma_port_member(p_hxge_t hxgep, uint8_t rdc)
339 339 {
340 340 p_hxge_dma_pt_cfg_t p_dma_cfgp;
341 341 p_hxge_hw_pt_cfg_t p_cfgp;
342 342 int status = B_TRUE;
343 343
344 344 HXGE_DEBUG_MSG((hxgep, CFG2_CTL, "==> hxge_check_rxdma_port_member"));
345 345
346 346 p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
347 347 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
348 348
349 349 /* Receive DMA Channels */
350 350 if (rdc < p_cfgp->max_rdcs)
351 351 status = B_TRUE;
352 352 HXGE_DEBUG_MSG((hxgep, CFG2_CTL, " <== hxge_check_rxdma_port_member"));
353 353
354 354 return (status);
355 355 }
356 356
357 357 boolean_t
358 358 hxge_check_txdma_port_member(p_hxge_t hxgep, uint8_t tdc)
359 359 {
360 360 p_hxge_dma_pt_cfg_t p_dma_cfgp;
361 361 p_hxge_hw_pt_cfg_t p_cfgp;
362 362 int status = B_FALSE;
363 363
364 364 HXGE_DEBUG_MSG((hxgep, CFG2_CTL, "==> hxge_check_txdma_port_member"));
365 365
366 366 p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
367 367 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
368 368
369 369 /* Receive DMA Channels */
370 370 if (tdc < p_cfgp->max_tdcs)
371 371 status = B_TRUE;
372 372 HXGE_DEBUG_MSG((hxgep, CFG2_CTL, " <== hxge_check_txdma_port_member"));
373 373
374 374 return (status);
375 375 }
376 376
377 377
378 378 /*
379 379 * Read the L2 classes, L3 classes, and initial hash from either hxge.conf
380 380 * or OBP. Populate these properties into the hxge data structure for latter
381 381 * use. Note that we are not updating these soft properties.
382 382 */
383 383 static void
384 384 hxge_set_hw_class_config(p_hxge_t hxgep)
385 385 {
386 386 int i, j;
387 387 p_hxge_param_t param_arr;
388 388 int *int_prop_val;
389 389 uint32_t cfg_value;
390 390 char *prop;
391 391 p_hxge_class_pt_cfg_t p_class_cfgp;
392 392 int start_prop, end_prop;
393 393 uint_t prop_cnt;
394 394
395 395 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_set_hw_class_config"));
396 396
397 397 p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
398 398
399 399 param_arr = hxgep->param_arr;
400 400
401 401 /*
402 402 * L2 class configuration. User configurable ether types
403 403 */
404 404 start_prop = param_class_cfg_ether_usr1;
405 405 end_prop = param_class_cfg_ether_usr2;
406 406
407 407 for (i = start_prop; i <= end_prop; i++) {
408 408 prop = param_arr[i].fcode_name;
409 409 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip,
410 410 0, prop, &int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) {
411 411 cfg_value = (uint32_t)*int_prop_val;
412 412 ddi_prop_free(int_prop_val);
413 413 } else {
414 414 cfg_value = (uint32_t)param_arr[i].value;
415 415 }
416 416
417 417 j = (i - start_prop) + TCAM_CLASS_ETYPE_1;
418 418 p_class_cfgp->class_cfg[j] = cfg_value;
419 419 }
420 420
421 421 /*
422 422 * Use properties from either .conf or the NDD param array. Only bits
423 423 * 2 and 3 are significant
424 424 */
425 425 start_prop = param_class_opt_ipv4_tcp;
426 426 end_prop = param_class_opt_ipv6_sctp;
427 427
428 428 for (i = start_prop; i <= end_prop; i++) {
429 429 prop = param_arr[i].fcode_name;
430 430 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip,
431 431 0, prop, &int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) {
432 432 cfg_value = (uint32_t)*int_prop_val;
433 433 ddi_prop_free(int_prop_val);
434 434 } else {
435 435 cfg_value = (uint32_t)param_arr[i].value;
436 436 }
437 437
438 438 j = (i - start_prop) + TCAM_CLASS_TCP_IPV4;
439 439 p_class_cfgp->class_cfg[j] = cfg_value;
440 440 }
441 441
442 442 prop = param_arr[param_hash_init_value].fcode_name;
443 443
444 444 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop,
445 445 &int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) {
446 446 cfg_value = (uint32_t)*int_prop_val;
447 447 ddi_prop_free(int_prop_val);
448 448 } else {
449 449 cfg_value = (uint32_t)param_arr[param_hash_init_value].value;
450 450 }
451 451
452 452 p_class_cfgp->init_hash = (uint32_t)cfg_value;
453 453
454 454 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_set_hw_class_config"));
455 455 }
456 456
457 457
458 458 /*
459 459 * Interrupts related interface functions.
460 460 */
461 461 hxge_status_t
462 462 hxge_ldgv_init(p_hxge_t hxgep, int *navail_p, int *nrequired_p)
463 463 {
464 464 uint8_t ldv, i, maxldvs, maxldgs, start, end, nldvs;
465 465 int ldg, endldg, ngrps;
466 466 uint8_t channel;
467 467 p_hxge_dma_pt_cfg_t p_dma_cfgp;
468 468 p_hxge_hw_pt_cfg_t p_cfgp;
469 469 p_hxge_ldgv_t ldgvp;
470 470 p_hxge_ldg_t ldgp, ptr;
471 471 p_hxge_ldv_t ldvp;
472 472 hxge_status_t status = HXGE_OK;
473 473 peu_intr_mask_t parity_err_mask;
474 474
475 475 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_ldgv_init"));
476 476 if (!*navail_p) {
477 477 *nrequired_p = 0;
478 478 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
479 479 "<== hxge_ldgv_init:no avail"));
480 480 return (HXGE_ERROR);
481 481 }
482 482 p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
483 483 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
484 484
485 485 /* each DMA channels */
486 486 nldvs = p_cfgp->max_tdcs + p_cfgp->max_rdcs;
487 487
488 488 /* vmac */
489 489 nldvs++;
490 490
491 491 /* pfc */
492 492 nldvs++;
493 493
494 494 /* system error interrupts. */
495 495 nldvs++;
496 496
497 497 maxldvs = nldvs;
498 498 maxldgs = p_cfgp->max_ldgs;
499 499
500 500 if (!maxldvs || !maxldgs) {
501 501 /* No devices configured. */
502 502 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_ldgv_init: "
503 503 "no logical devices or groups configured."));
504 504 return (HXGE_ERROR);
505 505 }
506 506 ldgvp = hxgep->ldgvp;
507 507 if (ldgvp == NULL) {
508 508 ldgvp = KMEM_ZALLOC(sizeof (hxge_ldgv_t), KM_SLEEP);
509 509 hxgep->ldgvp = ldgvp;
510 510 ldgvp->maxldgs = maxldgs;
511 511 ldgvp->maxldvs = maxldvs;
512 512 ldgp = ldgvp->ldgp =
513 513 KMEM_ZALLOC(sizeof (hxge_ldg_t) * maxldgs, KM_SLEEP);
514 514 ldvp = ldgvp->ldvp =
515 515 KMEM_ZALLOC(sizeof (hxge_ldv_t) * maxldvs, KM_SLEEP);
516 516 }
517 517
518 518 ldgvp->ndma_ldvs = p_cfgp->max_tdcs + p_cfgp->max_rdcs;
519 519 ldgvp->tmres = HXGE_TIMER_RESO;
520 520
521 521 HXGE_DEBUG_MSG((hxgep, INT_CTL,
522 522 "==> hxge_ldgv_init: maxldvs %d maxldgs %d nldvs %d",
523 523 maxldvs, maxldgs, nldvs));
524 524
525 525 ldg = p_cfgp->start_ldg;
526 526 ptr = ldgp;
527 527 for (i = 0; i < maxldgs; i++) {
528 528 ptr->arm = B_TRUE;
529 529 ptr->vldg_index = i;
530 530 ptr->ldg_timer = HXGE_TIMER_LDG;
531 531 ptr->ldg = ldg++;
532 532 ptr->sys_intr_handler = hxge_intr;
533 533 ptr->nldvs = 0;
534 534 ptr->hxgep = hxgep;
535 535 HXGE_DEBUG_MSG((hxgep, INT_CTL,
536 536 "==> hxge_ldgv_init: maxldvs %d maxldgs %d ldg %d",
537 537 maxldvs, maxldgs, ptr->ldg));
538 538 HXGE_DEBUG_MSG((hxgep, INT_CTL,
539 539 "==> hxge_ldv_init: timer %d", ptr->ldg_timer));
540 540 ptr++;
541 541 }
542 542
543 543 ldg = p_cfgp->start_ldg;
544 544 if (maxldgs > *navail_p) {
545 545 ngrps = *navail_p;
546 546 } else {
547 547 ngrps = maxldgs;
548 548 }
549 549 endldg = ldg + ngrps;
550 550
551 551 /*
552 552 * Receive DMA channels.
553 553 */
554 554 channel = p_cfgp->start_rdc;
555 555 start = p_cfgp->start_rdc + HXGE_RDMA_LD_START;
556 556 end = start + p_cfgp->max_rdcs;
557 557 nldvs = 0;
558 558 ldgvp->nldvs = 0;
559 559 ldgp->ldvp = NULL;
560 560 *nrequired_p = 0;
561 561 ptr = ldgp;
562 562
563 563 /*
564 564 * Start with RDC to configure logical devices for each group.
565 565 */
566 566 for (i = 0, ldv = start; ldv < end; i++, ldv++) {
567 567 ldvp->is_rxdma = B_TRUE;
568 568 ldvp->ldv = ldv;
569 569
570 570 /*
571 571 * If non-seq needs to change the following code
572 572 */
573 573 ldvp->channel = channel++;
574 574 ldvp->vdma_index = i;
575 575 ldvp->ldv_intr_handler = hxge_rx_intr;
576 576 ldvp->ldv_ldf_masks = 0;
577 577 ldvp->use_timer = B_FALSE;
578 578 ldvp->hxgep = hxgep;
579 579 hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
580 580 nldvs++;
581 581 }
582 582
583 583 /*
584 584 * Transmit DMA channels.
585 585 */
586 586 channel = p_cfgp->start_tdc;
587 587 start = p_cfgp->start_tdc + HXGE_TDMA_LD_START;
588 588 end = start + p_cfgp->max_tdcs;
589 589 for (i = 0, ldv = start; ldv < end; i++, ldv++) {
590 590 ldvp->is_txdma = B_TRUE;
591 591 ldvp->ldv = ldv;
592 592 ldvp->channel = channel++;
593 593 ldvp->vdma_index = i;
594 594 ldvp->ldv_intr_handler = hxge_tx_intr;
595 595 ldvp->ldv_ldf_masks = 0;
596 596 ldvp->use_timer = B_FALSE;
597 597 ldvp->hxgep = hxgep;
598 598 hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
599 599 nldvs++;
600 600 }
601 601
602 602 /*
603 603 * VMAC
604 604 */
605 605 ldvp->is_vmac = B_TRUE;
606 606 ldvp->ldv_intr_handler = hxge_vmac_intr;
607 607 ldvp->ldv_ldf_masks = 0;
608 608 ldv = HXGE_VMAC_LD;
609 609 ldvp->ldv = ldv;
610 610 ldvp->use_timer = B_FALSE;
611 611 ldvp->hxgep = hxgep;
612 612 hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
613 613 nldvs++;
614 614
615 615 HXGE_DEBUG_MSG((hxgep, INT_CTL,
616 616 "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d",
617 617 nldvs, *navail_p, *nrequired_p));
618 618
619 619 /*
620 620 * PFC
621 621 */
622 622 ldvp->is_pfc = B_TRUE;
623 623 ldvp->ldv_intr_handler = hxge_pfc_intr;
624 624 ldvp->ldv_ldf_masks = 0;
625 625 ldv = HXGE_PFC_LD;
626 626 ldvp->ldv = ldv;
627 627 ldvp->use_timer = B_FALSE;
628 628 ldvp->hxgep = hxgep;
629 629 hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
630 630 nldvs++;
631 631
632 632 HXGE_DEBUG_MSG((hxgep, INT_CTL,
633 633 "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d",
634 634 nldvs, *navail_p, *nrequired_p));
635 635
636 636 /*
637 637 * System error interrupts.
638 638 */
639 639 ldv = HXGE_SYS_ERROR_LD;
640 640 ldvp->ldv = ldv;
641 641 ldvp->is_syserr = B_TRUE;
642 642 ldvp->ldv_intr_handler = hxge_syserr_intr;
643 643 ldvp->ldv_ldf_masks = 0;
644 644 ldvp->hxgep = hxgep;
645 645 ldvp->use_timer = B_FALSE;
646 646 ldgvp->ldvp_syserr = ldvp;
647 647
648 648 /* Reset PEU error mask to allow PEU error interrupts */
649 649 /*
650 650 * Keep the msix parity error mask here and remove it
651 651 * after ddi_intr_enable call to avoid a msix par err
652 652 */
653 653 parity_err_mask.value = 0;
654 654 parity_err_mask.bits.eic_msix_parerr_mask = 1;
655 655 HXGE_REG_WR32(hxgep->hpi_handle, PEU_INTR_MASK, parity_err_mask.value);
656 656
657 657 /*
658 658 * Unmask the system interrupt states.
659 659 */
660 660 (void) hxge_fzc_sys_err_mask_set(hxgep, B_FALSE);
661 661 (void) hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
662 662 nldvs++;
663 663
664 664 ldgvp->ldg_intrs = *nrequired_p;
665 665
666 666 HXGE_DEBUG_MSG((hxgep, INT_CTL,
667 667 "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d",
668 668 nldvs, *navail_p, *nrequired_p));
669 669 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_ldgv_init"));
670 670 return (status);
671 671 }
672 672
673 673 hxge_status_t
674 674 hxge_ldgv_uninit(p_hxge_t hxgep)
675 675 {
676 676 p_hxge_ldgv_t ldgvp;
677 677
678 678 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_ldgv_uninit"));
679 679 ldgvp = hxgep->ldgvp;
680 680 if (ldgvp == NULL) {
681 681 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
682 682 "<== hxge_ldgv_uninit: no logical group configured."));
683 683 return (HXGE_OK);
684 684 }
685 685
686 686 if (ldgvp->ldgp) {
687 687 KMEM_FREE(ldgvp->ldgp, sizeof (hxge_ldg_t) * ldgvp->maxldgs);
688 688 }
689 689 if (ldgvp->ldvp) {
690 690 KMEM_FREE(ldgvp->ldvp, sizeof (hxge_ldv_t) * ldgvp->maxldvs);
691 691 }
692 692
693 693 KMEM_FREE(ldgvp, sizeof (hxge_ldgv_t));
694 694 hxgep->ldgvp = NULL;
695 695
696 696 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_ldgv_uninit"));
697 697 return (HXGE_OK);
698 698 }
699 699
700 700 hxge_status_t
701 701 hxge_intr_ldgv_init(p_hxge_t hxgep)
702 702 {
703 703 hxge_status_t status = HXGE_OK;
704 704
705 705 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_ldgv_init"));
706 706 /*
707 707 * Configure the logical device group numbers, state vectors
708 708 * and interrupt masks for each logical device.
709 709 */
710 710 status = hxge_fzc_intr_init(hxgep);
711 711
712 712 /*
713 713 * Configure logical device masks and timers.
714 714 */
715 715 status = hxge_intr_mask_mgmt(hxgep);
716 716
717 717 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_ldgv_init"));
718 718 return (status);
719 719 }
720 720
721 721 hxge_status_t
722 722 hxge_intr_mask_mgmt(p_hxge_t hxgep)
723 723 {
724 724 p_hxge_ldgv_t ldgvp;
725 725 p_hxge_ldg_t ldgp;
726 726 p_hxge_ldv_t ldvp;
727 727 hpi_handle_t handle;
728 728 int i, j;
729 729 hpi_status_t rs = HPI_SUCCESS;
730 730
731 731 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_mask_mgmt"));
732 732
733 733 if ((ldgvp = hxgep->ldgvp) == NULL) {
734 734 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
735 735 "<== hxge_intr_mask_mgmt: Null ldgvp"));
736 736 return (HXGE_ERROR);
737 737 }
738 738 handle = HXGE_DEV_HPI_HANDLE(hxgep);
739 739 ldgp = ldgvp->ldgp;
740 740 ldvp = ldgvp->ldvp;
741 741 if (ldgp == NULL || ldvp == NULL) {
742 742 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
743 743 "<== hxge_intr_mask_mgmt: Null ldgp or ldvp"));
744 744 return (HXGE_ERROR);
745 745 }
746 746
747 747 HXGE_DEBUG_MSG((hxgep, INT_CTL,
748 748 "==> hxge_intr_mask_mgmt: # of intrs %d ", ldgvp->ldg_intrs));
749 749 /* Initialize masks. */
750 750 HXGE_DEBUG_MSG((hxgep, INT_CTL,
751 751 "==> hxge_intr_mask_mgmt(Hydra): # intrs %d ", ldgvp->ldg_intrs));
752 752 for (i = 0; i < ldgvp->ldg_intrs; i++, ldgp++) {
753 753 HXGE_DEBUG_MSG((hxgep, INT_CTL,
754 754 "==> hxge_intr_mask_mgmt(Hydra): # ldv %d in group %d",
755 755 ldgp->nldvs, ldgp->ldg));
756 756 for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
757 757 HXGE_DEBUG_MSG((hxgep, INT_CTL,
758 758 "==> hxge_intr_mask_mgmt: set ldv # %d "
759 759 "for ldg %d", ldvp->ldv, ldgp->ldg));
760 760 rs = hpi_intr_mask_set(handle, ldvp->ldv,
761 761 ldvp->ldv_ldf_masks);
762 762 if (rs != HPI_SUCCESS) {
763 763 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
764 764 "<== hxge_intr_mask_mgmt: set mask failed "
765 765 " rs 0x%x ldv %d mask 0x%x",
766 766 rs, ldvp->ldv, ldvp->ldv_ldf_masks));
767 767 return (HXGE_ERROR | rs);
768 768 }
769 769 HXGE_DEBUG_MSG((hxgep, INT_CTL,
770 770 "==> hxge_intr_mask_mgmt: set mask OK "
771 771 " rs 0x%x ldv %d mask 0x%x",
772 772 rs, ldvp->ldv, ldvp->ldv_ldf_masks));
773 773 }
774 774 }
775 775
776 776 ldgp = ldgvp->ldgp;
777 777 /* Configure timer and arm bit */
778 778 for (i = 0; i < hxgep->ldgvp->ldg_intrs; i++, ldgp++) {
779 779 rs = hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
780 780 ldgp->arm, ldgp->ldg_timer);
781 781 if (rs != HPI_SUCCESS) {
782 782 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
783 783 "<== hxge_intr_mask_mgmt: set timer failed "
784 784 " rs 0x%x dg %d timer 0x%x",
785 785 rs, ldgp->ldg, ldgp->ldg_timer));
786 786 return (HXGE_ERROR | rs);
787 787 }
788 788 HXGE_DEBUG_MSG((hxgep, INT_CTL,
789 789 "==> hxge_intr_mask_mgmt: set timer OK "
790 790 " rs 0x%x ldg %d timer 0x%x",
791 791 rs, ldgp->ldg, ldgp->ldg_timer));
792 792 }
793 793
794 794 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_fzc_intr_mask_mgmt"));
795 795 return (HXGE_OK);
796 796 }
797 797
798 798 hxge_status_t
799 799 hxge_intr_mask_mgmt_set(p_hxge_t hxgep, boolean_t on)
800 800 {
801 801 p_hxge_ldgv_t ldgvp;
802 802 p_hxge_ldg_t ldgp;
803 803 p_hxge_ldv_t ldvp;
804 804 hpi_handle_t handle;
805 805 int i, j;
806 806 hpi_status_t rs = HPI_SUCCESS;
807 807
808 808 HXGE_DEBUG_MSG((hxgep, INT_CTL,
809 809 "==> hxge_intr_mask_mgmt_set (%d)", on));
810 810
811 811 if ((ldgvp = hxgep->ldgvp) == NULL) {
812 812 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
813 813 "==> hxge_intr_mask_mgmt_set: Null ldgvp"));
814 814 return (HXGE_ERROR);
815 815 }
816 816 handle = HXGE_DEV_HPI_HANDLE(hxgep);
817 817 ldgp = ldgvp->ldgp;
818 818 ldvp = ldgvp->ldvp;
819 819 if (ldgp == NULL || ldvp == NULL) {
820 820 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
821 821 "<== hxge_intr_mask_mgmt_set: Null ldgp or ldvp"));
822 822 return (HXGE_ERROR);
823 823 }
824 824
825 825 /* set masks. */
826 826 for (i = 0; i < ldgvp->ldg_intrs; i++, ldgp++) {
827 827 HXGE_DEBUG_MSG((hxgep, INT_CTL,
828 828 "==> hxge_intr_mask_mgmt_set: flag %d ldg %d"
829 829 "set mask nldvs %d", on, ldgp->ldg, ldgp->nldvs));
830 830 for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
831 831 HXGE_DEBUG_MSG((hxgep, INT_CTL,
832 832 "==> hxge_intr_mask_mgmt_set: "
833 833 "for %d %d flag %d", i, j, on));
834 834 if (on) {
835 835 ldvp->ldv_ldf_masks = 0;
836 836 HXGE_DEBUG_MSG((hxgep, INT_CTL,
837 837 "==> hxge_intr_mask_mgmt_set: "
838 838 "ON mask off"));
839 839 } else {
840 840 ldvp->ldv_ldf_masks = (uint8_t)LD_IM_MASK;
841 841 HXGE_DEBUG_MSG((hxgep, INT_CTL,
842 842 "==> hxge_intr_mask_mgmt_set:mask on"));
843 843 }
844 844
845 845 rs = hpi_intr_mask_set(handle, ldvp->ldv,
846 846 ldvp->ldv_ldf_masks);
847 847 if (rs != HPI_SUCCESS) {
848 848 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
849 849 "==> hxge_intr_mask_mgmt_set: "
850 850 "set mask failed rs 0x%x ldv %d mask 0x%x",
851 851 rs, ldvp->ldv, ldvp->ldv_ldf_masks));
852 852 return (HXGE_ERROR | rs);
853 853 }
854 854 HXGE_DEBUG_MSG((hxgep, INT_CTL,
855 855 "==> hxge_intr_mask_mgmt_set: flag %d"
856 856 "set mask OK ldv %d mask 0x%x",
857 857 on, ldvp->ldv, ldvp->ldv_ldf_masks));
858 858 }
859 859 }
860 860
861 861 ldgp = ldgvp->ldgp;
862 862 /* set the arm bit */
863 863 for (i = 0; i < hxgep->ldgvp->ldg_intrs; i++, ldgp++) {
864 864 if (on && !ldgp->arm) {
865 865 ldgp->arm = B_TRUE;
866 866 } else if (!on && ldgp->arm) {
867 867 ldgp->arm = B_FALSE;
868 868 }
869 869 rs = hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
870 870 ldgp->arm, ldgp->ldg_timer);
871 871 if (rs != HPI_SUCCESS) {
872 872 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
873 873 "<== hxge_intr_mask_mgmt_set: "
874 874 "set timer failed rs 0x%x ldg %d timer 0x%x",
875 875 rs, ldgp->ldg, ldgp->ldg_timer));
876 876 return (HXGE_ERROR | rs);
877 877 }
878 878 HXGE_DEBUG_MSG((hxgep, INT_CTL,
879 879 "==> hxge_intr_mask_mgmt_set: OK (flag %d) "
880 880 "set timer ldg %d timer 0x%x",
881 881 on, ldgp->ldg, ldgp->ldg_timer));
882 882 }
883 883
884 884 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_mask_mgmt_set"));
885 885 return (HXGE_OK);
886 886 }
887 887
888 888 /*
889 889 * For Big Endian systems, the mac address will be from OBP. For Little
890 890 * Endian (x64) systems, it will be retrieved from the card since it cannot
891 891 * be programmed into PXE.
892 892 * This function also populates the MMAC parameters.
893 893 */
894 894 static hxge_status_t
895 895 hxge_get_mac_addr_properties(p_hxge_t hxgep)
896 896 {
897 897 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_get_mac_addr_properties "));
898 898
899 899 (void) hxge_pfc_mac_addrs_get(hxgep);
900 900 hxgep->ouraddr = hxgep->factaddr;
901 901
902 902 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_get_mac_addr_properties "));
903 903 return (HXGE_OK);
904 904 }
905 905
906 906 static void
907 907 hxge_ldgv_setup(p_hxge_ldg_t *ldgp, p_hxge_ldv_t *ldvp, uint8_t ldv,
908 908 uint8_t endldg, int *ngrps)
909 909 {
910 910 HXGE_DEBUG_MSG((NULL, INT_CTL, "==> hxge_ldgv_setup"));
911 911 /* Assign the group number for each device. */
912 912 (*ldvp)->ldg_assigned = (*ldgp)->ldg;
913 913 (*ldvp)->ldgp = *ldgp;
914 914 (*ldvp)->ldv = ldv;
915 915
916 916 HXGE_DEBUG_MSG((NULL, INT_CTL,
917 917 "==> hxge_ldgv_setup: ldv %d endldg %d ldg %d, ldvp $%p",
918 918 ldv, endldg, (*ldgp)->ldg, (*ldgp)->ldvp));
919 919
920 920 (*ldgp)->nldvs++;
921 921 if ((*ldgp)->ldg == (endldg - 1)) {
922 922 if ((*ldgp)->ldvp == NULL) {
923 923 (*ldgp)->ldvp = *ldvp;
924 924 *ngrps += 1;
925 925 HXGE_DEBUG_MSG((NULL, INT_CTL,
926 926 "==> hxge_ldgv_setup: ngrps %d", *ngrps));
927 927 }
↓ open down ↓ |
927 lines elided |
↑ open up ↑ |
928 928 HXGE_DEBUG_MSG((NULL, INT_CTL,
929 929 "==> hxge_ldgv_setup: ldvp $%p ngrps %d",
930 930 *ldvp, *ngrps));
931 931 ++*ldvp;
932 932 } else {
933 933 (*ldgp)->ldvp = *ldvp;
934 934 *ngrps += 1;
935 935 HXGE_DEBUG_MSG((NULL, INT_CTL, "==> hxge_ldgv_setup(done): "
936 936 "ldv %d endldg %d ldg %d, ldvp $%p",
937 937 ldv, endldg, (*ldgp)->ldg, (*ldgp)->ldvp));
938 - (*ldvp) = ++*ldvp;
939 - (*ldgp) = ++*ldgp;
938 + ++*ldvp;
939 + ++*ldgp;
940 940 HXGE_DEBUG_MSG((NULL, INT_CTL,
941 941 "==> hxge_ldgv_setup: new ngrps %d", *ngrps));
942 942 }
943 943
944 944 HXGE_DEBUG_MSG((NULL, INT_CTL, "==> hxge_ldgv_setup: "
945 945 "ldg %d nldvs %d ldv %d ldvp $%p endldg %d ngrps %d",
946 946 (*ldgp)->ldg, (*ldgp)->nldvs, ldv, ldvp, endldg, *ngrps));
947 947
948 948 HXGE_DEBUG_MSG((NULL, INT_CTL, "<== hxge_ldgv_setup"));
949 949 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX