Print this page
8368 remove warlock leftovers from usr/src/uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ib/adapters/hermon/hermon_rsrc.c
+++ new/usr/src/uts/common/io/ib/adapters/hermon/hermon_rsrc.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * hermon_rsrc.c
28 28 * Hermon Resource Management Routines
29 29 *
30 30 * Implements all the routines necessary for setup, teardown, and
31 31 * alloc/free of all Hermon resources, including those that are managed
32 32 * by Hermon hardware or which live in Hermon's direct attached DDR memory.
33 33 */
34 34
35 35 #include <sys/sysmacros.h>
36 36 #include <sys/types.h>
37 37 #include <sys/conf.h>
38 38 #include <sys/ddi.h>
39 39 #include <sys/sunddi.h>
40 40 #include <sys/modctl.h>
41 41 #include <sys/vmem.h>
42 42 #include <sys/bitmap.h>
43 43
44 44 #include <sys/ib/adapters/hermon/hermon.h>
45 45
46 46 int hermon_rsrc_verbose = 0;
47 47
48 48 /*
49 49 * The following routines are used for initializing and destroying
50 50 * the resource pools used by the Hermon resource allocation routines.
51 51 * They consist of four classes of object:
52 52 *
53 53 * Mailboxes: The "In" and "Out" mailbox types are used by the Hermon
54 54 * command interface routines. Mailboxes are used to pass information
55 55 * back and forth to the Hermon firmware. Either type of mailbox may
56 56 * be allocated from Hermon's direct attached DDR memory or from system
57 57 * memory (although currently all "In" mailboxes are in DDR and all "out"
58 58 * mailboxes come from system memory.
59 59 *
60 60 * HW entry objects: These objects represent resources required by the Hermon
61 61 * hardware. These objects include things like Queue Pair contexts (QPC),
62 62 * Completion Queue contexts (CQC), Event Queue contexts (EQC), RDB (for
63 63 * supporting RDMA Read/Atomic), Multicast Group entries (MCG), Memory
64 64 * Protection Table entries (MPT), Memory Translation Table entries (MTT).
65 65 *
66 66 * What these objects all have in common is that they are each required
67 67 * to come from ICM memory, they are always allocated from tables, and
68 68 * they are not to be directly accessed (read or written) by driver
69 69 * software (Mellanox FMR access to MPT is an exception).
70 70 * The other notable exceptions are the UAR pages (UAR_PG) which are
71 71 * allocated from the UAR address space rather than DDR, and the UD
72 72 * address vectors (UDAV) which are similar to the common object types
73 73 * with the major difference being that UDAVs _are_ directly read and
74 74 * written by driver software.
75 75 *
76 76 * SW handle objects: These objects represent resources required by Hermon
77 77 * driver software. They are primarily software tracking structures,
78 78 * which are allocated from system memory (using kmem_cache). Several of
79 79 * the objects have both a "constructor" and "destructor" method
80 80 * associated with them (see below).
81 81 *
82 82 * Protection Domain (PD) handle objects: These objects are very much like
83 83 * a SW handle object with the notable difference that all PD handle
84 84 * objects have an actual Protection Domain number (PD) associated with
85 85 * them (and the PD number is allocated/managed through a separate
86 86 * vmem_arena specifically set aside for this purpose.
87 87 */
88 88
89 89 static int hermon_rsrc_mbox_init(hermon_state_t *state,
90 90 hermon_rsrc_mbox_info_t *info);
91 91 static void hermon_rsrc_mbox_fini(hermon_state_t *state,
92 92 hermon_rsrc_mbox_info_t *info);
93 93
94 94 static int hermon_rsrc_sw_handles_init(hermon_state_t *state,
95 95 hermon_rsrc_sw_hdl_info_t *info);
96 96 static void hermon_rsrc_sw_handles_fini(hermon_state_t *state,
97 97 hermon_rsrc_sw_hdl_info_t *info);
98 98
99 99 static int hermon_rsrc_pd_handles_init(hermon_state_t *state,
100 100 hermon_rsrc_sw_hdl_info_t *info);
101 101 static void hermon_rsrc_pd_handles_fini(hermon_state_t *state,
102 102 hermon_rsrc_sw_hdl_info_t *info);
103 103
104 104 /*
105 105 * The following routines are used for allocating and freeing the specific
106 106 * types of objects described above from their associated resource pools.
107 107 */
108 108 static int hermon_rsrc_mbox_alloc(hermon_rsrc_pool_info_t *pool_info,
109 109 uint_t num, hermon_rsrc_t *hdl);
110 110 static void hermon_rsrc_mbox_free(hermon_rsrc_t *hdl);
111 111
112 112 static int hermon_rsrc_hw_entry_alloc(hermon_rsrc_pool_info_t *pool_info,
113 113 uint_t num, uint_t num_align, uint_t sleepflag, hermon_rsrc_t *hdl);
114 114 static void hermon_rsrc_hw_entry_free(hermon_rsrc_pool_info_t *pool_info,
115 115 hermon_rsrc_t *hdl);
116 116 static int hermon_rsrc_hw_entry_reserve(hermon_rsrc_pool_info_t *pool_info,
117 117 uint_t num, uint_t num_align, uint_t sleepflag, hermon_rsrc_t *hdl);
118 118
119 119 static int hermon_rsrc_hw_entry_icm_confirm(hermon_rsrc_pool_info_t *pool_info,
120 120 uint_t num, hermon_rsrc_t *hdl, int num_to_hdl);
121 121 static int hermon_rsrc_hw_entry_icm_free(hermon_rsrc_pool_info_t *pool_info,
122 122 hermon_rsrc_t *hdl, int num_to_hdl);
123 123
124 124 static int hermon_rsrc_swhdl_alloc(hermon_rsrc_pool_info_t *pool_info,
125 125 uint_t sleepflag, hermon_rsrc_t *hdl);
126 126 static void hermon_rsrc_swhdl_free(hermon_rsrc_pool_info_t *pool_info,
127 127 hermon_rsrc_t *hdl);
128 128
129 129 static int hermon_rsrc_pdhdl_alloc(hermon_rsrc_pool_info_t *pool_info,
130 130 uint_t sleepflag, hermon_rsrc_t *hdl);
131 131 static void hermon_rsrc_pdhdl_free(hermon_rsrc_pool_info_t *pool_info,
132 132 hermon_rsrc_t *hdl);
133 133
134 134 static int hermon_rsrc_fexch_alloc(hermon_state_t *state,
135 135 hermon_rsrc_type_t rsrc, uint_t num, uint_t sleepflag, hermon_rsrc_t *hdl);
136 136 static void hermon_rsrc_fexch_free(hermon_state_t *state, hermon_rsrc_t *hdl);
137 137 static int hermon_rsrc_rfci_alloc(hermon_state_t *state,
138 138 hermon_rsrc_type_t rsrc, uint_t num, uint_t sleepflag, hermon_rsrc_t *hdl);
139 139 static void hermon_rsrc_rfci_free(hermon_state_t *state, hermon_rsrc_t *hdl);
140 140
141 141 /*
142 142 * The following routines are the constructors and destructors for several
143 143 * of the SW handle type objects. For certain types of SW handles objects
144 144 * (all of which are implemented using kmem_cache), we need to do some
145 145 * special field initialization (specifically, mutex_init/destroy). These
146 146 * routines enable that init and teardown.
147 147 */
148 148 static int hermon_rsrc_pdhdl_constructor(void *pd, void *priv, int flags);
149 149 static void hermon_rsrc_pdhdl_destructor(void *pd, void *state);
150 150 static int hermon_rsrc_cqhdl_constructor(void *cq, void *priv, int flags);
151 151 static void hermon_rsrc_cqhdl_destructor(void *cq, void *state);
152 152 static int hermon_rsrc_qphdl_constructor(void *cq, void *priv, int flags);
153 153 static void hermon_rsrc_qphdl_destructor(void *cq, void *state);
154 154 static int hermon_rsrc_srqhdl_constructor(void *srq, void *priv, int flags);
155 155 static void hermon_rsrc_srqhdl_destructor(void *srq, void *state);
156 156 static int hermon_rsrc_refcnt_constructor(void *rc, void *priv, int flags);
157 157 static void hermon_rsrc_refcnt_destructor(void *rc, void *state);
158 158 static int hermon_rsrc_ahhdl_constructor(void *ah, void *priv, int flags);
159 159 static void hermon_rsrc_ahhdl_destructor(void *ah, void *state);
160 160 static int hermon_rsrc_mrhdl_constructor(void *mr, void *priv, int flags);
161 161 static void hermon_rsrc_mrhdl_destructor(void *mr, void *state);
162 162
163 163 /*
164 164 * Special routine to calculate and return the size of a MCG object based
165 165 * on current driver configuration (specifically, the number of QP per MCG
166 166 * that has been configured.
167 167 */
168 168 static int hermon_rsrc_mcg_entry_get_size(hermon_state_t *state,
169 169 uint_t *mcg_size_shift);
170 170
171 171
172 172 /*
173 173 * hermon_rsrc_alloc()
174 174 *
175 175 * Context: Can be called from interrupt or base context.
176 176 * The "sleepflag" parameter is used by all object allocators to
177 177 * determine whether to SLEEP for resources or not.
178 178 */
179 179 int
180 180 hermon_rsrc_alloc(hermon_state_t *state, hermon_rsrc_type_t rsrc, uint_t num,
181 181 uint_t sleepflag, hermon_rsrc_t **hdl)
182 182 {
183 183 hermon_rsrc_pool_info_t *rsrc_pool;
184 184 hermon_rsrc_t *tmp_rsrc_hdl;
185 185 int flag, status = DDI_FAILURE;
186 186
187 187 ASSERT(state != NULL);
188 188 ASSERT(hdl != NULL);
189 189
190 190 rsrc_pool = &state->hs_rsrc_hdl[rsrc];
↓ open down ↓ |
190 lines elided |
↑ open up ↑ |
191 191 ASSERT(rsrc_pool != NULL);
192 192
193 193 /*
194 194 * Allocate space for the object used to track the resource handle
195 195 */
196 196 flag = (sleepflag == HERMON_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
197 197 tmp_rsrc_hdl = kmem_cache_alloc(state->hs_rsrc_cache, flag);
198 198 if (tmp_rsrc_hdl == NULL) {
199 199 return (DDI_FAILURE);
200 200 }
201 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*tmp_rsrc_hdl))
202 201
203 202 /*
204 203 * Set rsrc_hdl type. This is later used by the hermon_rsrc_free call
205 204 * to know what type of resource is being freed.
206 205 */
207 206 tmp_rsrc_hdl->rsrc_type = rsrc;
208 207
209 208 /*
210 209 * Depending on resource type, call the appropriate alloc routine
211 210 */
212 211 switch (rsrc) {
213 212 case HERMON_IN_MBOX:
214 213 case HERMON_OUT_MBOX:
215 214 case HERMON_INTR_IN_MBOX:
216 215 case HERMON_INTR_OUT_MBOX:
217 216 status = hermon_rsrc_mbox_alloc(rsrc_pool, num, tmp_rsrc_hdl);
218 217 break;
219 218
220 219 case HERMON_DMPT:
221 220 /* Allocate "num" (contiguous/aligned for FEXCH) DMPTs */
222 221 case HERMON_QPC:
223 222 /* Allocate "num" (contiguous/aligned for RSS) QPCs */
224 223 status = hermon_rsrc_hw_entry_alloc(rsrc_pool, num, num,
225 224 sleepflag, tmp_rsrc_hdl);
226 225 break;
227 226
228 227 case HERMON_QPC_FEXCH_PORT1:
229 228 case HERMON_QPC_FEXCH_PORT2:
230 229 /* Allocate "num" contiguous/aligned QPCs for FEXCH */
231 230 status = hermon_rsrc_fexch_alloc(state, rsrc, num,
232 231 sleepflag, tmp_rsrc_hdl);
233 232 break;
234 233
235 234 case HERMON_QPC_RFCI_PORT1:
236 235 case HERMON_QPC_RFCI_PORT2:
237 236 /* Allocate "num" contiguous/aligned QPCs for RFCI */
238 237 status = hermon_rsrc_rfci_alloc(state, rsrc, num,
239 238 sleepflag, tmp_rsrc_hdl);
240 239 break;
241 240
242 241 case HERMON_MTT:
243 242 case HERMON_CQC:
244 243 case HERMON_SRQC:
245 244 case HERMON_EQC:
246 245 case HERMON_MCG:
247 246 case HERMON_UARPG:
248 247 /* Allocate "num" unaligned resources */
249 248 status = hermon_rsrc_hw_entry_alloc(rsrc_pool, num, 1,
250 249 sleepflag, tmp_rsrc_hdl);
251 250 break;
252 251
253 252 case HERMON_MRHDL:
254 253 case HERMON_EQHDL:
255 254 case HERMON_CQHDL:
256 255 case HERMON_SRQHDL:
257 256 case HERMON_AHHDL:
258 257 case HERMON_QPHDL:
259 258 case HERMON_REFCNT:
260 259 status = hermon_rsrc_swhdl_alloc(rsrc_pool, sleepflag,
261 260 tmp_rsrc_hdl);
262 261 break;
263 262
264 263 case HERMON_PDHDL:
265 264 status = hermon_rsrc_pdhdl_alloc(rsrc_pool, sleepflag,
266 265 tmp_rsrc_hdl);
267 266 break;
268 267
269 268 case HERMON_RDB: /* handled during HERMON_QPC */
270 269 case HERMON_ALTC: /* handled during HERMON_QPC */
271 270 case HERMON_AUXC: /* handled during HERMON_QPC */
272 271 case HERMON_CMPT_QPC: /* handled during HERMON_QPC */
273 272 case HERMON_CMPT_SRQC: /* handled during HERMON_SRQC */
274 273 case HERMON_CMPT_CQC: /* handled during HERMON_CPC */
275 274 case HERMON_CMPT_EQC: /* handled during HERMON_EPC */
276 275 default:
277 276 HERMON_WARNING(state, "unexpected resource type in alloc ");
278 277 cmn_err(CE_WARN, "Resource type %x \n", rsrc_pool->rsrc_type);
279 278 break;
280 279 }
281 280
282 281 /*
283 282 * If the resource allocation failed, then free the special resource
284 283 * tracking structure and return failure. Otherwise return the
285 284 * handle for the resource tracking structure.
286 285 */
287 286 if (status != DDI_SUCCESS) {
288 287 kmem_cache_free(state->hs_rsrc_cache, tmp_rsrc_hdl);
289 288 return (DDI_FAILURE);
290 289 } else {
291 290 *hdl = tmp_rsrc_hdl;
292 291 return (DDI_SUCCESS);
293 292 }
294 293 }
295 294
296 295
297 296 /*
298 297 * hermon_rsrc_reserve()
299 298 *
300 299 * Context: Can only be called from attach.
301 300 * The "sleepflag" parameter is used by all object allocators to
302 301 * determine whether to SLEEP for resources or not.
303 302 */
304 303 int
305 304 hermon_rsrc_reserve(hermon_state_t *state, hermon_rsrc_type_t rsrc, uint_t num,
306 305 uint_t sleepflag, hermon_rsrc_t **hdl)
307 306 {
308 307 hermon_rsrc_pool_info_t *rsrc_pool;
309 308 hermon_rsrc_t *tmp_rsrc_hdl;
310 309 int flag, status = DDI_FAILURE;
311 310
312 311 ASSERT(state != NULL);
313 312 ASSERT(hdl != NULL);
314 313
315 314 rsrc_pool = &state->hs_rsrc_hdl[rsrc];
↓ open down ↓ |
104 lines elided |
↑ open up ↑ |
316 315 ASSERT(rsrc_pool != NULL);
317 316
318 317 /*
319 318 * Allocate space for the object used to track the resource handle
320 319 */
321 320 flag = (sleepflag == HERMON_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
322 321 tmp_rsrc_hdl = kmem_cache_alloc(state->hs_rsrc_cache, flag);
323 322 if (tmp_rsrc_hdl == NULL) {
324 323 return (DDI_FAILURE);
325 324 }
326 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*tmp_rsrc_hdl))
327 325
328 326 /*
329 327 * Set rsrc_hdl type. This is later used by the hermon_rsrc_free call
330 328 * to know what type of resource is being freed.
331 329 */
332 330 tmp_rsrc_hdl->rsrc_type = rsrc;
333 331
334 332 switch (rsrc) {
335 333 case HERMON_QPC:
336 334 case HERMON_DMPT:
337 335 case HERMON_MTT:
338 336 /*
339 337 * Reserve num resources, naturally aligned (N * num).
340 338 */
341 339 status = hermon_rsrc_hw_entry_reserve(rsrc_pool, num, num,
342 340 sleepflag, tmp_rsrc_hdl);
343 341 break;
344 342
345 343 default:
346 344 HERMON_WARNING(state, "unexpected resource type in reserve ");
347 345 cmn_err(CE_WARN, "Resource type %x \n", rsrc);
348 346 break;
349 347 }
350 348
351 349 /*
352 350 * If the resource allocation failed, then free the special resource
353 351 * tracking structure and return failure. Otherwise return the
354 352 * handle for the resource tracking structure.
355 353 */
356 354 if (status != DDI_SUCCESS) {
357 355 kmem_cache_free(state->hs_rsrc_cache, tmp_rsrc_hdl);
358 356 return (DDI_FAILURE);
359 357 } else {
360 358 *hdl = tmp_rsrc_hdl;
361 359 return (DDI_SUCCESS);
362 360 }
363 361 }
364 362
365 363
366 364 /*
367 365 * hermon_rsrc_fexch_alloc()
368 366 *
369 367 * Context: Can only be called from base context.
370 368 * The "sleepflag" parameter is used by all object allocators to
371 369 * determine whether to SLEEP for resources or not.
372 370 */
373 371 static int
374 372 hermon_rsrc_fexch_alloc(hermon_state_t *state, hermon_rsrc_type_t rsrc,
375 373 uint_t num, uint_t sleepflag, hermon_rsrc_t *hdl)
376 374 {
377 375 hermon_fcoib_t *fcoib;
378 376 void *addr;
379 377 uint32_t fexch_qpn_base;
380 378 hermon_rsrc_pool_info_t *qpc_pool, *mpt_pool, *mtt_pool;
381 379 int flag, status;
382 380 hermon_rsrc_t mpt_hdl; /* temporary, just for icm_confirm */
383 381 hermon_rsrc_t mtt_hdl; /* temporary, just for icm_confirm */
384 382 uint_t portm1; /* hca_port_number - 1 */
385 383 uint_t nummtt;
386 384 vmem_t *vmp;
387 385
388 386 ASSERT(state != NULL);
389 387 ASSERT(hdl != NULL);
390 388
391 389 if ((state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_FC) == 0)
392 390 return (DDI_FAILURE);
393 391
394 392 portm1 = rsrc - HERMON_QPC_FEXCH_PORT1;
395 393 fcoib = &state->hs_fcoib;
396 394 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
397 395
398 396 /* Allocate from the FEXCH QP range */
399 397 vmp = fcoib->hfc_fexch_vmemp[portm1];
400 398 addr = vmem_xalloc(vmp, num, num, 0, 0, NULL, NULL, flag | VM_FIRSTFIT);
401 399 if (addr == NULL) {
402 400 return (DDI_FAILURE);
403 401 }
404 402 fexch_qpn_base = (uint32_t)((uintptr_t)addr -
405 403 fcoib->hfc_vmemstart + fcoib->hfc_fexch_base[portm1]);
406 404
407 405 /* ICM confirm for the FEXCH QP range */
408 406 qpc_pool = &state->hs_rsrc_hdl[HERMON_QPC];
409 407 hdl->hr_len = num << qpc_pool->rsrc_shift;
410 408 hdl->hr_addr = addr; /* used only for vmem_xfree */
411 409 hdl->hr_indx = fexch_qpn_base;
412 410
413 411 status = hermon_rsrc_hw_entry_icm_confirm(qpc_pool, num, hdl, 1);
414 412 if (status != DDI_SUCCESS) {
415 413 vmem_xfree(vmp, addr, num);
416 414 return (DDI_FAILURE);
417 415 }
418 416
419 417 /* ICM confirm for the Primary MKEYs (client side only) */
420 418 mpt_pool = &state->hs_rsrc_hdl[HERMON_DMPT];
421 419 mpt_hdl.hr_len = num << mpt_pool->rsrc_shift;
422 420 mpt_hdl.hr_addr = NULL;
423 421 mpt_hdl.hr_indx = fcoib->hfc_mpt_base[portm1] +
424 422 (fexch_qpn_base - fcoib->hfc_fexch_base[portm1]);
425 423
426 424 status = hermon_rsrc_hw_entry_icm_confirm(mpt_pool, num, &mpt_hdl, 0);
427 425 if (status != DDI_SUCCESS) {
428 426 status = hermon_rsrc_hw_entry_icm_free(qpc_pool, hdl, 1);
429 427 vmem_xfree(vmp, addr, num);
430 428 return (DDI_FAILURE);
431 429 }
432 430
433 431 /* ICM confirm for the MTTs of the Primary MKEYs (client side only) */
434 432 nummtt = fcoib->hfc_mtts_per_mpt;
435 433 num *= nummtt;
436 434 mtt_pool = &state->hs_rsrc_hdl[HERMON_MTT];
437 435 mtt_hdl.hr_len = num << mtt_pool->rsrc_shift;
438 436 mtt_hdl.hr_addr = NULL;
439 437 mtt_hdl.hr_indx = fcoib->hfc_mtt_base[portm1] +
440 438 (fexch_qpn_base - fcoib->hfc_fexch_base[portm1]) *
441 439 nummtt;
442 440
443 441 status = hermon_rsrc_hw_entry_icm_confirm(mtt_pool, num, &mtt_hdl, 0);
444 442 if (status != DDI_SUCCESS) {
445 443 vmem_xfree(vmp, addr, num);
446 444 return (DDI_FAILURE);
447 445 }
448 446 return (DDI_SUCCESS);
449 447 }
450 448
451 449 static void
452 450 hermon_rsrc_fexch_free(hermon_state_t *state, hermon_rsrc_t *hdl)
453 451 {
454 452 hermon_fcoib_t *fcoib;
455 453 uint_t portm1; /* hca_port_number - 1 */
456 454
457 455 ASSERT(state != NULL);
458 456 ASSERT(hdl != NULL);
459 457
460 458 portm1 = hdl->rsrc_type - HERMON_QPC_FEXCH_PORT1;
461 459 fcoib = &state->hs_fcoib;
462 460 vmem_xfree(fcoib->hfc_fexch_vmemp[portm1], hdl->hr_addr,
463 461 hdl->hr_len >> state->hs_rsrc_hdl[HERMON_QPC].rsrc_shift);
464 462 }
465 463
466 464 /*
467 465 * hermon_rsrc_rfci_alloc()
468 466 *
469 467 * Context: Can only be called from base context.
470 468 * The "sleepflag" parameter is used by all object allocators to
471 469 * determine whether to SLEEP for resources or not.
472 470 */
473 471 static int
474 472 hermon_rsrc_rfci_alloc(hermon_state_t *state, hermon_rsrc_type_t rsrc,
475 473 uint_t num, uint_t sleepflag, hermon_rsrc_t *hdl)
476 474 {
477 475 hermon_fcoib_t *fcoib;
478 476 void *addr;
479 477 uint32_t rfci_qpn_base;
480 478 hermon_rsrc_pool_info_t *qpc_pool;
481 479 int flag, status;
482 480 uint_t portm1; /* hca_port_number - 1 */
483 481 vmem_t *vmp;
484 482
485 483 ASSERT(state != NULL);
486 484 ASSERT(hdl != NULL);
487 485
488 486 if ((state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_FC) == 0)
489 487 return (DDI_FAILURE);
490 488
491 489 portm1 = rsrc - HERMON_QPC_RFCI_PORT1;
492 490 fcoib = &state->hs_fcoib;
493 491 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
494 492
495 493 /* Allocate from the RFCI QP range */
496 494 vmp = fcoib->hfc_rfci_vmemp[portm1];
497 495 addr = vmem_xalloc(vmp, num, num, 0, 0, NULL, NULL, flag | VM_FIRSTFIT);
498 496 if (addr == NULL) {
499 497 return (DDI_FAILURE);
500 498 }
501 499 rfci_qpn_base = (uint32_t)((uintptr_t)addr -
502 500 fcoib->hfc_vmemstart + fcoib->hfc_rfci_base[portm1]);
503 501
504 502 /* ICM confirm for the RFCI QP */
505 503 qpc_pool = &state->hs_rsrc_hdl[HERMON_QPC];
506 504 hdl->hr_len = num << qpc_pool->rsrc_shift;
507 505 hdl->hr_addr = addr; /* used only for vmem_xfree */
508 506 hdl->hr_indx = rfci_qpn_base;
509 507
510 508 status = hermon_rsrc_hw_entry_icm_confirm(qpc_pool, num, hdl, 1);
511 509 if (status != DDI_SUCCESS) {
512 510 vmem_xfree(vmp, addr, num);
513 511 return (DDI_FAILURE);
514 512 }
515 513 return (DDI_SUCCESS);
516 514 }
517 515
518 516 static void
519 517 hermon_rsrc_rfci_free(hermon_state_t *state, hermon_rsrc_t *hdl)
520 518 {
521 519 hermon_fcoib_t *fcoib;
522 520 uint_t portm1; /* hca_port_number - 1 */
523 521
524 522 ASSERT(state != NULL);
525 523 ASSERT(hdl != NULL);
526 524
527 525 portm1 = hdl->rsrc_type - HERMON_QPC_RFCI_PORT1;
528 526 fcoib = &state->hs_fcoib;
529 527 vmem_xfree(fcoib->hfc_rfci_vmemp[portm1], hdl->hr_addr,
530 528 hdl->hr_len >> state->hs_rsrc_hdl[HERMON_QPC].rsrc_shift);
531 529 }
532 530
533 531
534 532 /*
535 533 * hermon_rsrc_free()
536 534 * Context: Can be called from interrupt or base context.
537 535 */
538 536 void
539 537 hermon_rsrc_free(hermon_state_t *state, hermon_rsrc_t **hdl)
540 538 {
541 539 hermon_rsrc_pool_info_t *rsrc_pool;
542 540
543 541 ASSERT(state != NULL);
544 542 ASSERT(hdl != NULL);
545 543
546 544 rsrc_pool = &state->hs_rsrc_hdl[(*hdl)->rsrc_type];
547 545 ASSERT(rsrc_pool != NULL);
548 546
549 547 /*
550 548 * Depending on resource type, call the appropriate free routine
551 549 */
552 550 switch (rsrc_pool->rsrc_type) {
553 551 case HERMON_IN_MBOX:
554 552 case HERMON_OUT_MBOX:
555 553 case HERMON_INTR_IN_MBOX:
556 554 case HERMON_INTR_OUT_MBOX:
557 555 hermon_rsrc_mbox_free(*hdl);
558 556 break;
559 557
560 558 case HERMON_QPC_FEXCH_PORT1:
561 559 case HERMON_QPC_FEXCH_PORT2:
562 560 hermon_rsrc_fexch_free(state, *hdl);
563 561 break;
564 562
565 563 case HERMON_QPC_RFCI_PORT1:
566 564 case HERMON_QPC_RFCI_PORT2:
567 565 hermon_rsrc_rfci_free(state, *hdl);
568 566 break;
569 567
570 568 case HERMON_QPC:
571 569 case HERMON_CQC:
572 570 case HERMON_SRQC:
573 571 case HERMON_EQC:
574 572 case HERMON_DMPT:
575 573 case HERMON_MCG:
576 574 case HERMON_MTT:
577 575 case HERMON_UARPG:
578 576 hermon_rsrc_hw_entry_free(rsrc_pool, *hdl);
579 577 break;
580 578
581 579 case HERMON_MRHDL:
582 580 case HERMON_EQHDL:
583 581 case HERMON_CQHDL:
584 582 case HERMON_SRQHDL:
585 583 case HERMON_AHHDL:
586 584 case HERMON_QPHDL:
587 585 case HERMON_REFCNT:
588 586 hermon_rsrc_swhdl_free(rsrc_pool, *hdl);
589 587 break;
590 588
591 589 case HERMON_PDHDL:
592 590 hermon_rsrc_pdhdl_free(rsrc_pool, *hdl);
593 591 break;
594 592
595 593 case HERMON_RDB:
596 594 case HERMON_ALTC:
597 595 case HERMON_AUXC:
598 596 case HERMON_CMPT_QPC:
599 597 case HERMON_CMPT_SRQC:
600 598 case HERMON_CMPT_CQC:
601 599 case HERMON_CMPT_EQC:
602 600 default:
603 601 cmn_err(CE_CONT, "!rsrc_type = 0x%x\n", rsrc_pool->rsrc_type);
604 602 break;
605 603 }
606 604
607 605 /*
608 606 * Free the special resource tracking structure, set the handle to
609 607 * NULL, and return.
610 608 */
611 609 kmem_cache_free(state->hs_rsrc_cache, *hdl);
612 610 *hdl = NULL;
613 611 }
614 612
615 613
616 614 /*
617 615 * hermon_rsrc_init_phase1()
618 616 *
619 617 * Completes the first phase of Hermon resource/configuration init.
620 618 * This involves creating the kmem_cache for the "hermon_rsrc_t"
621 619 * structs, allocating the space for the resource pool handles,
622 620 * and setting up the "Out" mailboxes.
623 621 *
624 622 * When this function completes, the Hermon driver is ready to
625 623 * post the following commands which return information only in the
626 624 * "Out" mailbox: QUERY_DDR, QUERY_FW, QUERY_DEV_LIM, and QUERY_ADAPTER
627 625 * If any of these commands are to be posted at this time, they must be
628 626 * done so only when "spinning" (as the outstanding command list and
629 627 * EQ setup code has not yet run)
630 628 *
631 629 * Context: Only called from attach() path context
632 630 */
633 631 int
634 632 hermon_rsrc_init_phase1(hermon_state_t *state)
635 633 {
636 634 hermon_rsrc_pool_info_t *rsrc_pool;
637 635 hermon_rsrc_mbox_info_t mbox_info;
638 636 hermon_rsrc_cleanup_level_t cleanup;
639 637 hermon_cfg_profile_t *cfgprof;
640 638 uint64_t num, size;
641 639 int status;
642 640 char *rsrc_name;
643 641
644 642 ASSERT(state != NULL);
645 643
646 644 /* This is where Phase 1 of resource initialization begins */
647 645 cleanup = HERMON_RSRC_CLEANUP_LEVEL0;
648 646
649 647 /* Build kmem cache name from Hermon instance */
650 648 rsrc_name = kmem_zalloc(HERMON_RSRC_NAME_MAXLEN, KM_SLEEP);
651 649 HERMON_RSRC_NAME(rsrc_name, HERMON_RSRC_CACHE);
652 650
653 651 /*
654 652 * Create the kmem_cache for "hermon_rsrc_t" structures
655 653 * (kmem_cache_create will SLEEP until successful)
656 654 */
657 655 state->hs_rsrc_cache = kmem_cache_create(rsrc_name,
658 656 sizeof (hermon_rsrc_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
659 657
660 658 /*
661 659 * Allocate an array of hermon_rsrc_pool_info_t's (used in all
662 660 * subsequent resource allocations)
663 661 */
664 662 state->hs_rsrc_hdl = kmem_zalloc(HERMON_NUM_RESOURCES *
665 663 sizeof (hermon_rsrc_pool_info_t), KM_SLEEP);
666 664
667 665 /* Pull in the configuration profile */
668 666 cfgprof = state->hs_cfg_profile;
669 667
670 668 /* Initialize the resource pool for "out" mailboxes */
671 669 num = ((uint64_t)1 << cfgprof->cp_log_num_outmbox);
672 670 size = ((uint64_t)1 << cfgprof->cp_log_outmbox_size);
673 671 rsrc_pool = &state->hs_rsrc_hdl[HERMON_OUT_MBOX];
674 672 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
675 673 rsrc_pool->rsrc_pool_size = (size * num);
676 674 rsrc_pool->rsrc_shift = cfgprof->cp_log_outmbox_size;
677 675 rsrc_pool->rsrc_quantum = (uint_t)size;
678 676 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN;
679 677 rsrc_pool->rsrc_state = state;
680 678 mbox_info.mbi_num = num;
681 679 mbox_info.mbi_size = size;
682 680 mbox_info.mbi_rsrcpool = rsrc_pool;
683 681 status = hermon_rsrc_mbox_init(state, &mbox_info);
684 682 if (status != DDI_SUCCESS) {
685 683 hermon_rsrc_fini(state, cleanup);
686 684 status = DDI_FAILURE;
687 685 goto rsrcinitp1_fail;
688 686 }
689 687 cleanup = HERMON_RSRC_CLEANUP_LEVEL1;
690 688
691 689 /* Initialize the mailbox list */
692 690 status = hermon_outmbox_list_init(state);
693 691 if (status != DDI_SUCCESS) {
694 692 hermon_rsrc_fini(state, cleanup);
695 693 status = DDI_FAILURE;
696 694 goto rsrcinitp1_fail;
697 695 }
698 696 cleanup = HERMON_RSRC_CLEANUP_LEVEL2;
699 697
700 698 /* Initialize the resource pool for "interrupt out" mailboxes */
701 699 num = ((uint64_t)1 << cfgprof->cp_log_num_intr_outmbox);
702 700 size = ((uint64_t)1 << cfgprof->cp_log_outmbox_size);
703 701 rsrc_pool = &state->hs_rsrc_hdl[HERMON_INTR_OUT_MBOX];
704 702 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
705 703 rsrc_pool->rsrc_pool_size = (size * num);
706 704 rsrc_pool->rsrc_shift = cfgprof->cp_log_outmbox_size;
707 705 rsrc_pool->rsrc_quantum = (uint_t)size;
708 706 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN;
709 707 rsrc_pool->rsrc_state = state;
710 708 mbox_info.mbi_num = num;
711 709 mbox_info.mbi_size = size;
712 710 mbox_info.mbi_rsrcpool = rsrc_pool;
713 711 status = hermon_rsrc_mbox_init(state, &mbox_info);
714 712 if (status != DDI_SUCCESS) {
715 713 hermon_rsrc_fini(state, cleanup);
716 714 status = DDI_FAILURE;
717 715 goto rsrcinitp1_fail;
718 716 }
719 717 cleanup = HERMON_RSRC_CLEANUP_LEVEL3;
720 718
721 719 /* Initialize the mailbox list */
722 720 status = hermon_intr_outmbox_list_init(state);
723 721 if (status != DDI_SUCCESS) {
724 722 hermon_rsrc_fini(state, cleanup);
725 723 status = DDI_FAILURE;
726 724 goto rsrcinitp1_fail;
727 725 }
728 726 cleanup = HERMON_RSRC_CLEANUP_LEVEL4;
729 727
730 728 /* Initialize the resource pool for "in" mailboxes */
731 729 num = ((uint64_t)1 << cfgprof->cp_log_num_inmbox);
732 730 size = ((uint64_t)1 << cfgprof->cp_log_inmbox_size);
733 731 rsrc_pool = &state->hs_rsrc_hdl[HERMON_IN_MBOX];
734 732 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
735 733 rsrc_pool->rsrc_pool_size = (size * num);
736 734 rsrc_pool->rsrc_shift = cfgprof->cp_log_inmbox_size;
737 735 rsrc_pool->rsrc_quantum = (uint_t)size;
738 736 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN;
739 737 rsrc_pool->rsrc_state = state;
740 738 mbox_info.mbi_num = num;
741 739 mbox_info.mbi_size = size;
742 740 mbox_info.mbi_rsrcpool = rsrc_pool;
743 741 status = hermon_rsrc_mbox_init(state, &mbox_info);
744 742 if (status != DDI_SUCCESS) {
745 743 hermon_rsrc_fini(state, cleanup);
746 744 status = DDI_FAILURE;
747 745 goto rsrcinitp1_fail;
748 746 }
749 747 cleanup = HERMON_RSRC_CLEANUP_LEVEL5;
750 748
751 749 /* Initialize the mailbox list */
752 750 status = hermon_inmbox_list_init(state);
753 751 if (status != DDI_SUCCESS) {
754 752 hermon_rsrc_fini(state, cleanup);
755 753 status = DDI_FAILURE;
756 754 goto rsrcinitp1_fail;
757 755 }
758 756 cleanup = HERMON_RSRC_CLEANUP_LEVEL6;
759 757
760 758 /* Initialize the resource pool for "interrupt in" mailboxes */
761 759 num = ((uint64_t)1 << cfgprof->cp_log_num_intr_inmbox);
762 760 size = ((uint64_t)1 << cfgprof->cp_log_inmbox_size);
763 761 rsrc_pool = &state->hs_rsrc_hdl[HERMON_INTR_IN_MBOX];
764 762 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
765 763 rsrc_pool->rsrc_pool_size = (size * num);
766 764 rsrc_pool->rsrc_shift = cfgprof->cp_log_inmbox_size;
767 765 rsrc_pool->rsrc_quantum = (uint_t)size;
768 766 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN;
769 767 rsrc_pool->rsrc_state = state;
770 768 mbox_info.mbi_num = num;
771 769 mbox_info.mbi_size = size;
772 770 mbox_info.mbi_rsrcpool = rsrc_pool;
773 771 status = hermon_rsrc_mbox_init(state, &mbox_info);
774 772 if (status != DDI_SUCCESS) {
775 773 hermon_rsrc_fini(state, cleanup);
776 774 status = DDI_FAILURE;
777 775 goto rsrcinitp1_fail;
778 776 }
779 777 cleanup = HERMON_RSRC_CLEANUP_LEVEL7;
780 778
781 779 /* Initialize the mailbox list */
782 780 status = hermon_intr_inmbox_list_init(state);
783 781 if (status != DDI_SUCCESS) {
784 782 hermon_rsrc_fini(state, cleanup);
785 783 status = DDI_FAILURE;
786 784 goto rsrcinitp1_fail;
787 785 }
788 786 cleanup = HERMON_RSRC_CLEANUP_PHASE1_COMPLETE;
789 787 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN);
790 788 return (DDI_SUCCESS);
791 789
792 790 rsrcinitp1_fail:
793 791 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN);
794 792 return (status);
795 793 }
796 794
797 795
798 796 /*
799 797 * hermon_rsrc_init_phase2()
800 798 * Context: Only called from attach() path context
801 799 */
802 800 int
803 801 hermon_rsrc_init_phase2(hermon_state_t *state)
804 802 {
805 803 hermon_rsrc_sw_hdl_info_t hdl_info;
806 804 hermon_rsrc_hw_entry_info_t entry_info;
807 805 hermon_rsrc_pool_info_t *rsrc_pool;
808 806 hermon_rsrc_cleanup_level_t cleanup, ncleanup;
809 807 hermon_cfg_profile_t *cfgprof;
810 808 hermon_hw_querydevlim_t *devlim;
811 809 uint64_t num, max, num_prealloc;
812 810 uint_t mcg_size, mcg_size_shift;
813 811 int i, status;
814 812 char *rsrc_name;
815 813
816 814 ASSERT(state != NULL);
817 815
818 816 /* Phase 2 initialization begins where Phase 1 left off */
819 817 cleanup = HERMON_RSRC_CLEANUP_PHASE1_COMPLETE;
820 818
821 819 /* Allocate the ICM resource name space */
822 820
823 821 /* Build the ICM vmem arena names from Hermon instance */
824 822 rsrc_name = kmem_zalloc(HERMON_RSRC_NAME_MAXLEN, KM_SLEEP);
825 823
826 824 /*
827 825 * Initialize the resource pools for all objects that exist in
828 826 * context memory (ICM). The ICM consists of context tables, each
829 827 * type of resource (QP, CQ, EQ, etc) having it's own context table
830 828 * (QPC, CQC, EQC, etc...).
831 829 */
832 830 cfgprof = state->hs_cfg_profile;
833 831 devlim = &state->hs_devlim;
834 832
835 833 /*
836 834 * Initialize the resource pools for each of the driver resources.
837 835 * With a few exceptions, these resources fall into the two cateogories
838 836 * of either hw_entries or sw_entries.
839 837 */
840 838
841 839 /*
842 840 * Initialize the resource pools for ICM (hardware) types first.
843 841 * These resources are managed through vmem arenas, which are
844 842 * created via the rsrc pool initialization routine. Note that,
845 843 * due to further calculations, the MCG resource pool is
846 844 * initialized seperately.
847 845 */
848 846 for (i = 0; i < HERMON_NUM_ICM_RESOURCES; i++) {
849 847
850 848 rsrc_pool = &state->hs_rsrc_hdl[i];
851 849 rsrc_pool->rsrc_type = i;
852 850 rsrc_pool->rsrc_state = state;
853 851
854 852 /* Set the resource-specific attributes */
855 853 switch (i) {
856 854 case HERMON_MTT:
857 855 max = ((uint64_t)1 << devlim->log_max_mtt);
858 856 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_mtt);
859 857 HERMON_RSRC_NAME(rsrc_name, HERMON_MTT_VMEM);
860 858 ncleanup = HERMON_RSRC_CLEANUP_LEVEL9;
861 859 break;
862 860
863 861 case HERMON_DMPT:
864 862 max = ((uint64_t)1 << devlim->log_max_dmpt);
865 863 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_dmpt);
866 864 HERMON_RSRC_NAME(rsrc_name, HERMON_DMPT_VMEM);
867 865 ncleanup = HERMON_RSRC_CLEANUP_LEVEL10;
868 866 break;
869 867
870 868 case HERMON_QPC:
871 869 max = ((uint64_t)1 << devlim->log_max_qp);
872 870 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_qp);
873 871 HERMON_RSRC_NAME(rsrc_name, HERMON_QPC_VMEM);
874 872 ncleanup = HERMON_RSRC_CLEANUP_LEVEL11;
875 873 break;
876 874
877 875 case HERMON_CQC:
878 876 max = ((uint64_t)1 << devlim->log_max_cq);
879 877 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_cq);
880 878 HERMON_RSRC_NAME(rsrc_name, HERMON_CQC_VMEM);
881 879 ncleanup = HERMON_RSRC_CLEANUP_LEVEL13;
882 880 break;
883 881
884 882 case HERMON_SRQC:
885 883 max = ((uint64_t)1 << devlim->log_max_srq);
886 884 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_srq);
887 885 HERMON_RSRC_NAME(rsrc_name, HERMON_SRQC_VMEM);
888 886 ncleanup = HERMON_RSRC_CLEANUP_LEVEL16;
889 887 break;
890 888
891 889 case HERMON_EQC:
892 890 max = ((uint64_t)1 << devlim->log_max_eq);
893 891 num_prealloc = state->hs_rsvd_eqs;
894 892 HERMON_RSRC_NAME(rsrc_name, HERMON_EQC_VMEM);
895 893 ncleanup = HERMON_RSRC_CLEANUP_LEVEL18;
896 894 break;
897 895
898 896 case HERMON_MCG: /* handled below */
899 897 case HERMON_AUXC:
900 898 case HERMON_ALTC:
901 899 case HERMON_RDB:
902 900 case HERMON_CMPT_QPC:
903 901 case HERMON_CMPT_SRQC:
904 902 case HERMON_CMPT_CQC:
905 903 case HERMON_CMPT_EQC:
906 904 default:
907 905 /* We don't need to initialize this rsrc here. */
908 906 continue;
909 907 }
910 908
911 909 /* Set the common values for all resource pools */
912 910 rsrc_pool->rsrc_state = state;
913 911 rsrc_pool->rsrc_loc = HERMON_IN_ICM;
914 912 rsrc_pool->rsrc_pool_size = state->hs_icm[i].table_size;
915 913 rsrc_pool->rsrc_align = state->hs_icm[i].table_size;
916 914 rsrc_pool->rsrc_shift = state->hs_icm[i].log_object_size;
917 915 rsrc_pool->rsrc_quantum = state->hs_icm[i].object_size;
918 916
919 917 /* Now, initialize the entry_info and call the init routine */
920 918 entry_info.hwi_num = state->hs_icm[i].num_entries;
921 919 entry_info.hwi_max = max;
922 920 entry_info.hwi_prealloc = num_prealloc;
923 921 entry_info.hwi_rsrcpool = rsrc_pool;
924 922 entry_info.hwi_rsrcname = rsrc_name;
925 923 status = hermon_rsrc_hw_entries_init(state, &entry_info);
926 924 if (status != DDI_SUCCESS) {
927 925 hermon_rsrc_fini(state, cleanup);
928 926 status = DDI_FAILURE;
929 927 goto rsrcinitp2_fail;
930 928 }
931 929 cleanup = ncleanup;
932 930 }
933 931
934 932 /*
935 933 * Initialize the Multicast Group (MCG) entries. First, calculate
936 934 * (and validate) the size of the MCGs.
937 935 */
938 936 status = hermon_rsrc_mcg_entry_get_size(state, &mcg_size_shift);
939 937 if (status != DDI_SUCCESS) {
940 938 hermon_rsrc_fini(state, cleanup);
941 939 status = DDI_FAILURE;
942 940 goto rsrcinitp2_fail;
943 941 }
944 942 mcg_size = HERMON_MCGMEM_SZ(state);
945 943
946 944 /*
947 945 * Initialize the resource pool for the MCG table entries. Notice
948 946 * that the number of MCGs is configurable. Note also that a certain
949 947 * number of MCGs must be set aside for Hermon firmware use (they
950 948 * correspond to the number of MCGs used by the internal hash
951 949 * function).
952 950 */
953 951 num = ((uint64_t)1 << cfgprof->cp_log_num_mcg);
954 952 max = ((uint64_t)1 << devlim->log_max_mcg);
955 953 num_prealloc = ((uint64_t)1 << cfgprof->cp_log_num_mcg_hash);
956 954 rsrc_pool = &state->hs_rsrc_hdl[HERMON_MCG];
957 955 rsrc_pool->rsrc_loc = HERMON_IN_ICM;
958 956 rsrc_pool->rsrc_pool_size = (mcg_size * num);
959 957 rsrc_pool->rsrc_shift = mcg_size_shift;
960 958 rsrc_pool->rsrc_quantum = mcg_size;
961 959 rsrc_pool->rsrc_align = (mcg_size * num);
962 960 rsrc_pool->rsrc_state = state;
963 961 HERMON_RSRC_NAME(rsrc_name, HERMON_MCG_VMEM);
964 962 entry_info.hwi_num = num;
965 963 entry_info.hwi_max = max;
966 964 entry_info.hwi_prealloc = num_prealloc;
967 965 entry_info.hwi_rsrcpool = rsrc_pool;
968 966 entry_info.hwi_rsrcname = rsrc_name;
969 967 status = hermon_rsrc_hw_entries_init(state, &entry_info);
970 968 if (status != DDI_SUCCESS) {
971 969 hermon_rsrc_fini(state, cleanup);
972 970 status = DDI_FAILURE;
973 971 goto rsrcinitp2_fail;
974 972 }
975 973 cleanup = HERMON_RSRC_CLEANUP_LEVEL19;
976 974
977 975 /*
978 976 * Initialize the full range of ICM for the AUXC resource.
979 977 * This is done because its size is so small, about 1 byte per QP.
980 978 */
981 979
982 980 /*
983 981 * Initialize the Hermon command handling interfaces. This step
984 982 * sets up the outstanding command tracking mechanism for easy access
985 983 * and fast allocation (see hermon_cmd.c for more details).
986 984 */
987 985 status = hermon_outstanding_cmdlist_init(state);
988 986 if (status != DDI_SUCCESS) {
989 987 hermon_rsrc_fini(state, cleanup);
990 988 status = DDI_FAILURE;
991 989 goto rsrcinitp2_fail;
992 990 }
993 991 cleanup = HERMON_RSRC_CLEANUP_LEVEL20;
994 992
995 993 /* Initialize the resource pool and vmem arena for the PD handles */
996 994 rsrc_pool = &state->hs_rsrc_hdl[HERMON_PDHDL];
997 995 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
998 996 rsrc_pool->rsrc_quantum = sizeof (struct hermon_sw_pd_s);
999 997 rsrc_pool->rsrc_state = state;
1000 998 HERMON_RSRC_NAME(rsrc_name, HERMON_PDHDL_CACHE);
1001 999 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_pd);
1002 1000 hdl_info.swi_max = ((uint64_t)1 << devlim->log_max_pd);
1003 1001 hdl_info.swi_rsrcpool = rsrc_pool;
1004 1002 hdl_info.swi_constructor = hermon_rsrc_pdhdl_constructor;
1005 1003 hdl_info.swi_destructor = hermon_rsrc_pdhdl_destructor;
1006 1004 hdl_info.swi_rsrcname = rsrc_name;
1007 1005 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1008 1006 status = hermon_rsrc_pd_handles_init(state, &hdl_info);
1009 1007 if (status != DDI_SUCCESS) {
1010 1008 hermon_rsrc_fini(state, cleanup);
1011 1009 status = DDI_FAILURE;
1012 1010 goto rsrcinitp2_fail;
1013 1011 }
1014 1012 cleanup = HERMON_RSRC_CLEANUP_LEVEL21;
1015 1013
1016 1014 /*
1017 1015 * Initialize the resource pools for the rest of the software handles.
1018 1016 * This includes MR handles, EQ handles, QP handles, etc. These
1019 1017 * objects are almost entirely managed using kmem_cache routines,
1020 1018 * and do not utilize a vmem arena.
1021 1019 */
1022 1020 for (i = HERMON_NUM_ICM_RESOURCES; i < HERMON_NUM_RESOURCES; i++) {
1023 1021 rsrc_pool = &state->hs_rsrc_hdl[i];
1024 1022 rsrc_pool->rsrc_type = i;
1025 1023
1026 1024 /* Set the resource-specific attributes */
1027 1025 switch (i) {
1028 1026 case HERMON_MRHDL:
1029 1027 rsrc_pool->rsrc_quantum =
1030 1028 sizeof (struct hermon_sw_mr_s);
1031 1029 HERMON_RSRC_NAME(rsrc_name, HERMON_MRHDL_CACHE);
1032 1030 hdl_info.swi_num =
1033 1031 ((uint64_t)1 << cfgprof->cp_log_num_dmpt) +
1034 1032 ((uint64_t)1 << cfgprof->cp_log_num_cmpt);
1035 1033 hdl_info.swi_max =
1036 1034 ((uint64_t)1 << cfgprof->cp_log_num_dmpt) +
1037 1035 ((uint64_t)1 << cfgprof->cp_log_num_cmpt);
1038 1036 hdl_info.swi_constructor =
1039 1037 hermon_rsrc_mrhdl_constructor;
1040 1038 hdl_info.swi_destructor = hermon_rsrc_mrhdl_destructor;
1041 1039 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1042 1040 ncleanup = HERMON_RSRC_CLEANUP_LEVEL22;
1043 1041 break;
1044 1042
1045 1043 case HERMON_EQHDL:
1046 1044 rsrc_pool->rsrc_quantum =
1047 1045 sizeof (struct hermon_sw_eq_s);
1048 1046 HERMON_RSRC_NAME(rsrc_name, HERMON_EQHDL_CACHE);
1049 1047 hdl_info.swi_num = HERMON_NUM_EQ;
1050 1048 hdl_info.swi_max = ((uint64_t)1 << devlim->log_max_eq);
1051 1049 hdl_info.swi_constructor = NULL;
1052 1050 hdl_info.swi_destructor = NULL;
1053 1051 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1054 1052 ncleanup = HERMON_RSRC_CLEANUP_LEVEL23;
1055 1053 break;
1056 1054
1057 1055 case HERMON_CQHDL:
1058 1056 rsrc_pool->rsrc_quantum =
1059 1057 sizeof (struct hermon_sw_cq_s);
1060 1058 HERMON_RSRC_NAME(rsrc_name, HERMON_CQHDL_CACHE);
1061 1059 hdl_info.swi_num =
1062 1060 (uint64_t)1 << cfgprof->cp_log_num_cq;
1063 1061 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_cq;
1064 1062 hdl_info.swi_constructor =
1065 1063 hermon_rsrc_cqhdl_constructor;
1066 1064 hdl_info.swi_destructor = hermon_rsrc_cqhdl_destructor;
1067 1065 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1068 1066 hdl_info.swi_prealloc_sz = sizeof (hermon_cqhdl_t);
1069 1067 ncleanup = HERMON_RSRC_CLEANUP_LEVEL24;
1070 1068 break;
1071 1069
1072 1070 case HERMON_SRQHDL:
1073 1071 rsrc_pool->rsrc_quantum =
1074 1072 sizeof (struct hermon_sw_srq_s);
1075 1073 HERMON_RSRC_NAME(rsrc_name, HERMON_SRQHDL_CACHE);
1076 1074 hdl_info.swi_num =
1077 1075 (uint64_t)1 << cfgprof->cp_log_num_srq;
1078 1076 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_srq;
1079 1077 hdl_info.swi_constructor =
1080 1078 hermon_rsrc_srqhdl_constructor;
1081 1079 hdl_info.swi_destructor = hermon_rsrc_srqhdl_destructor;
1082 1080 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1083 1081 hdl_info.swi_prealloc_sz = sizeof (hermon_srqhdl_t);
1084 1082 ncleanup = HERMON_RSRC_CLEANUP_LEVEL25;
1085 1083 break;
1086 1084
1087 1085 case HERMON_AHHDL:
1088 1086 rsrc_pool->rsrc_quantum =
1089 1087 sizeof (struct hermon_sw_ah_s);
1090 1088 HERMON_RSRC_NAME(rsrc_name, HERMON_AHHDL_CACHE);
1091 1089 hdl_info.swi_num =
1092 1090 (uint64_t)1 << cfgprof->cp_log_num_ah;
1093 1091 hdl_info.swi_max = HERMON_NUM_AH;
1094 1092 hdl_info.swi_constructor =
1095 1093 hermon_rsrc_ahhdl_constructor;
1096 1094 hdl_info.swi_destructor = hermon_rsrc_ahhdl_destructor;
1097 1095 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1098 1096 ncleanup = HERMON_RSRC_CLEANUP_LEVEL26;
1099 1097 break;
1100 1098
1101 1099 case HERMON_QPHDL:
1102 1100 rsrc_pool->rsrc_quantum =
1103 1101 sizeof (struct hermon_sw_qp_s);
1104 1102 HERMON_RSRC_NAME(rsrc_name, HERMON_QPHDL_CACHE);
1105 1103 hdl_info.swi_num =
1106 1104 (uint64_t)1 << cfgprof->cp_log_num_qp;
1107 1105 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_qp;
1108 1106 hdl_info.swi_constructor =
1109 1107 hermon_rsrc_qphdl_constructor;
1110 1108 hdl_info.swi_destructor = hermon_rsrc_qphdl_destructor;
1111 1109 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1112 1110 hdl_info.swi_prealloc_sz = sizeof (hermon_qphdl_t);
1113 1111 ncleanup = HERMON_RSRC_CLEANUP_LEVEL27;
1114 1112 break;
1115 1113
1116 1114 case HERMON_REFCNT:
1117 1115 rsrc_pool->rsrc_quantum = sizeof (hermon_sw_refcnt_t);
1118 1116 HERMON_RSRC_NAME(rsrc_name, HERMON_REFCNT_CACHE);
1119 1117 hdl_info.swi_num =
1120 1118 (uint64_t)1 << cfgprof->cp_log_num_dmpt;
1121 1119 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_dmpt;
1122 1120 hdl_info.swi_constructor =
1123 1121 hermon_rsrc_refcnt_constructor;
1124 1122 hdl_info.swi_destructor = hermon_rsrc_refcnt_destructor;
1125 1123 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1126 1124 ncleanup = HERMON_RSRC_CLEANUP_LEVEL28;
1127 1125 break;
1128 1126
1129 1127 default:
1130 1128 continue;
1131 1129 }
1132 1130
1133 1131 /* Set the common values and call the init routine */
1134 1132 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
1135 1133 rsrc_pool->rsrc_state = state;
1136 1134 hdl_info.swi_rsrcpool = rsrc_pool;
1137 1135 hdl_info.swi_rsrcname = rsrc_name;
1138 1136 status = hermon_rsrc_sw_handles_init(state, &hdl_info);
1139 1137 if (status != DDI_SUCCESS) {
1140 1138 hermon_rsrc_fini(state, cleanup);
1141 1139 status = DDI_FAILURE;
1142 1140 goto rsrcinitp2_fail;
1143 1141 }
1144 1142 cleanup = ncleanup;
1145 1143 }
1146 1144
1147 1145 /*
1148 1146 * Initialize a resource pool for the MCG handles. Notice that for
1149 1147 * these MCG handles, we are allocating a table of structures (used to
1150 1148 * keep track of the MCG entries that are being written to hardware
1151 1149 * and to speed up multicast attach/detach operations).
1152 1150 */
1153 1151 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_mcg);
1154 1152 hdl_info.swi_max = ((uint64_t)1 << devlim->log_max_mcg);
1155 1153 hdl_info.swi_flags = HERMON_SWHDL_TABLE_INIT;
1156 1154 hdl_info.swi_prealloc_sz = sizeof (struct hermon_sw_mcg_list_s);
1157 1155 status = hermon_rsrc_sw_handles_init(state, &hdl_info);
1158 1156 if (status != DDI_SUCCESS) {
1159 1157 hermon_rsrc_fini(state, cleanup);
1160 1158 status = DDI_FAILURE;
1161 1159 goto rsrcinitp2_fail;
1162 1160 }
1163 1161 state->hs_mcghdl = hdl_info.swi_table_ptr;
1164 1162 cleanup = HERMON_RSRC_CLEANUP_LEVEL29;
1165 1163
1166 1164 /*
1167 1165 * Last, initialize the resource pool for the UAR pages, which contain
1168 1166 * the hardware's doorbell registers. Each process supported in User
1169 1167 * Mode is assigned a UAR page. Also coming from this pool are the
1170 1168 * kernel-assigned UAR page, and any hardware-reserved pages. Note
1171 1169 * that the number of UAR pages is configurable, the value must be less
1172 1170 * than the maximum value (obtained from the QUERY_DEV_LIM command) or
1173 1171 * the initialization will fail. Note also that we assign the base
1174 1172 * address of the UAR BAR to the rsrc_start parameter.
1175 1173 */
1176 1174 num = ((uint64_t)1 << cfgprof->cp_log_num_uar);
1177 1175 max = num;
1178 1176 num_prealloc = max(devlim->num_rsvd_uar, 128);
1179 1177 rsrc_pool = &state->hs_rsrc_hdl[HERMON_UARPG];
1180 1178 rsrc_pool->rsrc_loc = HERMON_IN_UAR;
1181 1179 rsrc_pool->rsrc_pool_size = (num << PAGESHIFT);
1182 1180 rsrc_pool->rsrc_shift = PAGESHIFT;
1183 1181 rsrc_pool->rsrc_quantum = (uint_t)PAGESIZE;
1184 1182 rsrc_pool->rsrc_align = PAGESIZE;
1185 1183 rsrc_pool->rsrc_state = state;
1186 1184 rsrc_pool->rsrc_start = (void *)state->hs_reg_uar_baseaddr;
1187 1185 HERMON_RSRC_NAME(rsrc_name, HERMON_UAR_PAGE_VMEM_ATTCH);
1188 1186 entry_info.hwi_num = num;
1189 1187 entry_info.hwi_max = max;
1190 1188 entry_info.hwi_prealloc = num_prealloc;
1191 1189 entry_info.hwi_rsrcpool = rsrc_pool;
1192 1190 entry_info.hwi_rsrcname = rsrc_name;
1193 1191 status = hermon_rsrc_hw_entries_init(state, &entry_info);
1194 1192 if (status != DDI_SUCCESS) {
1195 1193 hermon_rsrc_fini(state, cleanup);
1196 1194 status = DDI_FAILURE;
1197 1195 goto rsrcinitp2_fail;
1198 1196 }
1199 1197
1200 1198 cleanup = HERMON_RSRC_CLEANUP_ALL;
1201 1199
1202 1200 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN);
1203 1201 return (DDI_SUCCESS);
1204 1202
1205 1203 rsrcinitp2_fail:
1206 1204 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN);
1207 1205 return (status);
1208 1206 }
1209 1207
1210 1208
1211 1209 /*
1212 1210 * hermon_rsrc_fini()
1213 1211 * Context: Only called from attach() and/or detach() path contexts
1214 1212 */
1215 1213 void
1216 1214 hermon_rsrc_fini(hermon_state_t *state, hermon_rsrc_cleanup_level_t clean)
1217 1215 {
1218 1216 hermon_rsrc_sw_hdl_info_t hdl_info;
1219 1217 hermon_rsrc_hw_entry_info_t entry_info;
1220 1218 hermon_rsrc_mbox_info_t mbox_info;
1221 1219 hermon_cfg_profile_t *cfgprof;
1222 1220
1223 1221 ASSERT(state != NULL);
1224 1222
1225 1223 cfgprof = state->hs_cfg_profile;
1226 1224
1227 1225 /*
1228 1226 * If init code above is shortened up (see comments), then we
1229 1227 * need to establish how to safely and simply clean up from any
1230 1228 * given failure point. Flags, maybe...
1231 1229 */
1232 1230
1233 1231 switch (clean) {
1234 1232 /*
1235 1233 * If we add more resources that need to be cleaned up here, we should
1236 1234 * ensure that HERMON_RSRC_CLEANUP_ALL is still the first entry (i.e.
1237 1235 * corresponds to the last resource allocated).
1238 1236 */
1239 1237
1240 1238 case HERMON_RSRC_CLEANUP_ALL:
1241 1239 case HERMON_RSRC_CLEANUP_LEVEL31:
1242 1240 /* Cleanup the UAR page resource pool, first the dbr pages */
1243 1241 if (state->hs_kern_dbr) {
1244 1242 hermon_dbr_kern_free(state);
1245 1243 state->hs_kern_dbr = NULL;
1246 1244 }
1247 1245
1248 1246 /* NS then, the pool itself */
1249 1247 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_UARPG];
1250 1248 hermon_rsrc_hw_entries_fini(state, &entry_info);
1251 1249
1252 1250 /* FALLTHROUGH */
1253 1251
1254 1252 case HERMON_RSRC_CLEANUP_LEVEL30:
1255 1253 /* Cleanup the central MCG handle pointers list */
1256 1254 hdl_info.swi_rsrcpool = NULL;
1257 1255 hdl_info.swi_table_ptr = state->hs_mcghdl;
1258 1256 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_mcg);
1259 1257 hdl_info.swi_prealloc_sz = sizeof (struct hermon_sw_mcg_list_s);
1260 1258 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1261 1259 /* FALLTHROUGH */
1262 1260
1263 1261 case HERMON_RSRC_CLEANUP_LEVEL29:
1264 1262 /* Cleanup the reference count resource pool */
1265 1263 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_REFCNT];
1266 1264 hdl_info.swi_table_ptr = NULL;
1267 1265 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1268 1266 /* FALLTHROUGH */
1269 1267
1270 1268 case HERMON_RSRC_CLEANUP_LEVEL28:
1271 1269 /* Cleanup the QP handle resource pool */
1272 1270 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_QPHDL];
1273 1271 hdl_info.swi_table_ptr = NULL;
1274 1272 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_qp);
1275 1273 hdl_info.swi_prealloc_sz = sizeof (hermon_qphdl_t);
1276 1274 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1277 1275 /* FALLTHROUGH */
1278 1276 case HERMON_RSRC_CLEANUP_LEVEL27:
1279 1277 /* Cleanup the address handle resrouce pool */
1280 1278 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_AHHDL];
1281 1279 hdl_info.swi_table_ptr = NULL;
1282 1280 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1283 1281 /* FALLTHROUGH */
1284 1282
1285 1283 case HERMON_RSRC_CLEANUP_LEVEL26:
1286 1284 /* Cleanup the SRQ handle resource pool. */
1287 1285 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_SRQHDL];
1288 1286 hdl_info.swi_table_ptr = NULL;
1289 1287 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_srq);
1290 1288 hdl_info.swi_prealloc_sz = sizeof (hermon_srqhdl_t);
1291 1289 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1292 1290 /* FALLTHROUGH */
1293 1291
1294 1292 case HERMON_RSRC_CLEANUP_LEVEL25:
1295 1293 /* Cleanup the CQ handle resource pool */
1296 1294 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CQHDL];
1297 1295 hdl_info.swi_table_ptr = NULL;
1298 1296 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_cq);
1299 1297 hdl_info.swi_prealloc_sz = sizeof (hermon_cqhdl_t);
1300 1298 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1301 1299 /* FALLTHROUGH */
1302 1300
1303 1301 case HERMON_RSRC_CLEANUP_LEVEL24:
1304 1302 /* Cleanup the EQ handle resource pool */
1305 1303 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_EQHDL];
1306 1304 hdl_info.swi_table_ptr = NULL;
1307 1305 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1308 1306 /* FALLTHROUGH */
1309 1307
1310 1308 case HERMON_RSRC_CLEANUP_LEVEL23:
1311 1309 /* Cleanup the MR handle resource pool */
1312 1310 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_MRHDL];
1313 1311 hdl_info.swi_table_ptr = NULL;
1314 1312 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1315 1313 /* FALLTHROUGH */
1316 1314
1317 1315 case HERMON_RSRC_CLEANUP_LEVEL22:
1318 1316 /* Cleanup the PD handle resource pool */
1319 1317 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_PDHDL];
1320 1318 hdl_info.swi_table_ptr = NULL;
1321 1319 hermon_rsrc_pd_handles_fini(state, &hdl_info);
1322 1320 /* FALLTHROUGH */
1323 1321
1324 1322 case HERMON_RSRC_CLEANUP_LEVEL21:
1325 1323 /* Currently unused - FALLTHROUGH */
1326 1324
1327 1325 case HERMON_RSRC_CLEANUP_LEVEL20:
1328 1326 /* Cleanup the outstanding command list */
1329 1327 hermon_outstanding_cmdlist_fini(state);
1330 1328 /* FALLTHROUGH */
1331 1329
1332 1330 case HERMON_RSRC_CLEANUP_LEVEL19:
1333 1331 /* Cleanup the EQC table resource pool */
1334 1332 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_EQC];
1335 1333 hermon_rsrc_hw_entries_fini(state, &entry_info);
1336 1334 /* FALLTHROUGH */
1337 1335
1338 1336 case HERMON_RSRC_CLEANUP_LEVEL18:
1339 1337 /* Cleanup the MCG table resource pool */
1340 1338 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_MCG];
1341 1339 hermon_rsrc_hw_entries_fini(state, &entry_info);
1342 1340 /* FALLTHROUGH */
1343 1341
1344 1342 case HERMON_RSRC_CLEANUP_LEVEL17:
1345 1343 /* Currently Unused - fallthrough */
1346 1344 case HERMON_RSRC_CLEANUP_LEVEL16:
1347 1345 /* Cleanup the SRQC table resource pool */
1348 1346 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_SRQC];
1349 1347 hermon_rsrc_hw_entries_fini(state, &entry_info);
1350 1348 /* FALLTHROUGH */
1351 1349
1352 1350 case HERMON_RSRC_CLEANUP_LEVEL15:
1353 1351 /* Cleanup the AUXC table resource pool */
1354 1352 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_AUXC];
1355 1353 hermon_rsrc_hw_entries_fini(state, &entry_info);
1356 1354 /* FALLTHROUGH */
1357 1355
1358 1356 case HERMON_RSRC_CLEANUP_LEVEL14:
1359 1357 /* Cleanup the ALTCF table resource pool */
1360 1358 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_ALTC];
1361 1359 hermon_rsrc_hw_entries_fini(state, &entry_info);
1362 1360 /* FALLTHROUGH */
1363 1361
1364 1362 case HERMON_RSRC_CLEANUP_LEVEL13:
1365 1363 /* Cleanup the CQC table resource pool */
1366 1364 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CQC];
1367 1365 hermon_rsrc_hw_entries_fini(state, &entry_info);
1368 1366 /* FALLTHROUGH */
1369 1367
1370 1368 case HERMON_RSRC_CLEANUP_LEVEL12:
1371 1369 /* Cleanup the RDB table resource pool */
1372 1370 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_RDB];
1373 1371 hermon_rsrc_hw_entries_fini(state, &entry_info);
1374 1372 /* FALLTHROUGH */
1375 1373
1376 1374 case HERMON_RSRC_CLEANUP_LEVEL11:
1377 1375 /* Cleanup the QPC table resource pool */
1378 1376 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_QPC];
1379 1377 hermon_rsrc_hw_entries_fini(state, &entry_info);
1380 1378 /* FALLTHROUGH */
1381 1379
1382 1380 case HERMON_RSRC_CLEANUP_LEVEL10EQ:
1383 1381 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */
1384 1382 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_EQC];
1385 1383 hermon_rsrc_hw_entries_fini(state, &entry_info);
1386 1384 /* FALLTHROUGH */
1387 1385
1388 1386 case HERMON_RSRC_CLEANUP_LEVEL10CQ:
1389 1387 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */
1390 1388 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_CQC];
1391 1389 hermon_rsrc_hw_entries_fini(state, &entry_info);
1392 1390 /* FALLTHROUGH */
1393 1391
1394 1392 case HERMON_RSRC_CLEANUP_LEVEL10SRQ:
1395 1393 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */
1396 1394 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_SRQC];
1397 1395 hermon_rsrc_hw_entries_fini(state, &entry_info);
1398 1396 /* FALLTHROUGH */
1399 1397
1400 1398 case HERMON_RSRC_CLEANUP_LEVEL10QP:
1401 1399 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */
1402 1400 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_QPC];
1403 1401 hermon_rsrc_hw_entries_fini(state, &entry_info);
1404 1402 /* FALLTHROUGH */
1405 1403
1406 1404 case HERMON_RSRC_CLEANUP_LEVEL10:
1407 1405 /* Cleanup the dMPT table resource pool */
1408 1406 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_DMPT];
1409 1407 hermon_rsrc_hw_entries_fini(state, &entry_info);
1410 1408 /* FALLTHROUGH */
1411 1409
1412 1410 case HERMON_RSRC_CLEANUP_LEVEL9:
1413 1411 /* Cleanup the MTT table resource pool */
1414 1412 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_MTT];
1415 1413 hermon_rsrc_hw_entries_fini(state, &entry_info);
1416 1414 break;
1417 1415
1418 1416 /*
1419 1417 * The cleanup below comes from the "Phase 1" initialization step.
1420 1418 * (see hermon_rsrc_init_phase1() above)
1421 1419 */
1422 1420 case HERMON_RSRC_CLEANUP_PHASE1_COMPLETE:
1423 1421 /* Cleanup the "In" mailbox list */
1424 1422 hermon_intr_inmbox_list_fini(state);
1425 1423 /* FALLTHROUGH */
1426 1424
1427 1425 case HERMON_RSRC_CLEANUP_LEVEL7:
1428 1426 /* Cleanup the interrupt "In" mailbox resource pool */
1429 1427 mbox_info.mbi_rsrcpool =
1430 1428 &state->hs_rsrc_hdl[HERMON_INTR_IN_MBOX];
1431 1429 hermon_rsrc_mbox_fini(state, &mbox_info);
1432 1430 /* FALLTHROUGH */
1433 1431
1434 1432 case HERMON_RSRC_CLEANUP_LEVEL6:
1435 1433 /* Cleanup the "In" mailbox list */
1436 1434 hermon_inmbox_list_fini(state);
1437 1435 /* FALLTHROUGH */
1438 1436
1439 1437 case HERMON_RSRC_CLEANUP_LEVEL5:
1440 1438 /* Cleanup the "In" mailbox resource pool */
1441 1439 mbox_info.mbi_rsrcpool = &state->hs_rsrc_hdl[HERMON_IN_MBOX];
1442 1440 hermon_rsrc_mbox_fini(state, &mbox_info);
1443 1441 /* FALLTHROUGH */
1444 1442
1445 1443 case HERMON_RSRC_CLEANUP_LEVEL4:
1446 1444 /* Cleanup the interrupt "Out" mailbox list */
1447 1445 hermon_intr_outmbox_list_fini(state);
1448 1446 /* FALLTHROUGH */
1449 1447
1450 1448 case HERMON_RSRC_CLEANUP_LEVEL3:
1451 1449 /* Cleanup the "Out" mailbox resource pool */
1452 1450 mbox_info.mbi_rsrcpool =
1453 1451 &state->hs_rsrc_hdl[HERMON_INTR_OUT_MBOX];
1454 1452 hermon_rsrc_mbox_fini(state, &mbox_info);
1455 1453 /* FALLTHROUGH */
1456 1454
1457 1455 case HERMON_RSRC_CLEANUP_LEVEL2:
1458 1456 /* Cleanup the "Out" mailbox list */
1459 1457 hermon_outmbox_list_fini(state);
1460 1458 /* FALLTHROUGH */
1461 1459
1462 1460 case HERMON_RSRC_CLEANUP_LEVEL1:
1463 1461 /* Cleanup the "Out" mailbox resource pool */
1464 1462 mbox_info.mbi_rsrcpool = &state->hs_rsrc_hdl[HERMON_OUT_MBOX];
1465 1463 hermon_rsrc_mbox_fini(state, &mbox_info);
1466 1464 /* FALLTHROUGH */
1467 1465
1468 1466 case HERMON_RSRC_CLEANUP_LEVEL0:
1469 1467 /* Free the array of hermon_rsrc_pool_info_t's */
1470 1468
1471 1469 kmem_free(state->hs_rsrc_hdl, HERMON_NUM_RESOURCES *
1472 1470 sizeof (hermon_rsrc_pool_info_t));
1473 1471
1474 1472 kmem_cache_destroy(state->hs_rsrc_cache);
1475 1473 break;
1476 1474
1477 1475 default:
1478 1476 HERMON_WARNING(state, "unexpected resource cleanup level");
1479 1477 break;
1480 1478 }
1481 1479 }
1482 1480
1483 1481
1484 1482 /*
1485 1483 * hermon_rsrc_mbox_init()
1486 1484 * Context: Only called from attach() path context
1487 1485 */
1488 1486 static int
1489 1487 hermon_rsrc_mbox_init(hermon_state_t *state, hermon_rsrc_mbox_info_t *info)
1490 1488 {
1491 1489 hermon_rsrc_pool_info_t *rsrc_pool;
1492 1490 hermon_rsrc_priv_mbox_t *priv;
1493 1491
1494 1492 ASSERT(state != NULL);
1495 1493 ASSERT(info != NULL);
1496 1494
1497 1495 rsrc_pool = info->mbi_rsrcpool;
1498 1496 ASSERT(rsrc_pool != NULL);
1499 1497
1500 1498 /* Allocate and initialize mailbox private structure */
1501 1499 priv = kmem_zalloc(sizeof (hermon_rsrc_priv_mbox_t), KM_SLEEP);
1502 1500 priv->pmb_dip = state->hs_dip;
1503 1501 priv->pmb_devaccattr = state->hs_reg_accattr;
1504 1502 priv->pmb_xfer_mode = DDI_DMA_CONSISTENT;
1505 1503
1506 1504 /*
1507 1505 * Initialize many of the default DMA attributes. Then set alignment
1508 1506 * and scatter-gather restrictions specific for mailbox memory.
1509 1507 */
1510 1508 hermon_dma_attr_init(state, &priv->pmb_dmaattr);
1511 1509 priv->pmb_dmaattr.dma_attr_align = HERMON_MBOX_ALIGN;
1512 1510 priv->pmb_dmaattr.dma_attr_sgllen = 1;
1513 1511 priv->pmb_dmaattr.dma_attr_flags = 0;
1514 1512 rsrc_pool->rsrc_private = priv;
1515 1513
1516 1514 ASSERT(rsrc_pool->rsrc_loc == HERMON_IN_SYSMEM);
1517 1515
1518 1516 rsrc_pool->rsrc_start = NULL;
1519 1517 rsrc_pool->rsrc_vmp = NULL;
1520 1518
1521 1519 return (DDI_SUCCESS);
1522 1520 }
1523 1521
1524 1522
1525 1523 /*
1526 1524 * hermon_rsrc_mbox_fini()
1527 1525 * Context: Only called from attach() and/or detach() path contexts
1528 1526 */
1529 1527 /* ARGSUSED */
1530 1528 static void
1531 1529 hermon_rsrc_mbox_fini(hermon_state_t *state, hermon_rsrc_mbox_info_t *info)
1532 1530 {
1533 1531 hermon_rsrc_pool_info_t *rsrc_pool;
1534 1532
1535 1533 ASSERT(state != NULL);
1536 1534 ASSERT(info != NULL);
1537 1535
1538 1536 rsrc_pool = info->mbi_rsrcpool;
1539 1537 ASSERT(rsrc_pool != NULL);
1540 1538
1541 1539 /* Free up the private struct */
1542 1540 kmem_free(rsrc_pool->rsrc_private, sizeof (hermon_rsrc_priv_mbox_t));
1543 1541 }
1544 1542
1545 1543
1546 1544 /*
1547 1545 * hermon_rsrc_hw_entries_init()
1548 1546 * Context: Only called from attach() path context
1549 1547 */
1550 1548 int
1551 1549 hermon_rsrc_hw_entries_init(hermon_state_t *state,
1552 1550 hermon_rsrc_hw_entry_info_t *info)
1553 1551 {
1554 1552 hermon_rsrc_pool_info_t *rsrc_pool;
1555 1553 hermon_rsrc_t *rsvd_rsrc = NULL;
1556 1554 vmem_t *vmp;
1557 1555 uint64_t num_hwentry, max_hwentry, num_prealloc;
1558 1556 int status;
1559 1557
1560 1558 ASSERT(state != NULL);
1561 1559 ASSERT(info != NULL);
1562 1560
1563 1561 rsrc_pool = info->hwi_rsrcpool;
1564 1562 ASSERT(rsrc_pool != NULL);
1565 1563 num_hwentry = info->hwi_num;
1566 1564 max_hwentry = info->hwi_max;
1567 1565 num_prealloc = info->hwi_prealloc;
1568 1566
1569 1567 if (hermon_rsrc_verbose) {
1570 1568 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init: "
1571 1569 "rsrc_type (0x%x) num (%llx) max (0x%llx) prealloc "
1572 1570 "(0x%llx)", rsrc_pool->rsrc_type, (longlong_t)num_hwentry,
1573 1571 (longlong_t)max_hwentry, (longlong_t)num_prealloc);
1574 1572 }
1575 1573
1576 1574 /* Make sure number of HW entries makes sense */
1577 1575 if (num_hwentry > max_hwentry) {
1578 1576 return (DDI_FAILURE);
1579 1577 }
1580 1578
1581 1579 /* Set this pool's rsrc_start from the initial ICM allocation */
1582 1580 if (rsrc_pool->rsrc_start == 0) {
1583 1581
1584 1582 /* use a ROUND value that works on both 32 and 64-bit kernels */
1585 1583 rsrc_pool->rsrc_start = (void *)(uintptr_t)0x10000000;
1586 1584
1587 1585 if (hermon_rsrc_verbose) {
1588 1586 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init:"
1589 1587 " rsrc_type (0x%x) rsrc_start set (0x%lx)",
1590 1588 rsrc_pool->rsrc_type, rsrc_pool->rsrc_start);
1591 1589 }
1592 1590 }
1593 1591
1594 1592 /*
1595 1593 * Create new vmem arena for the HW entries table if rsrc_quantum
1596 1594 * is non-zero. Otherwise if rsrc_quantum is zero, then these HW
1597 1595 * entries are not going to be dynamically allocatable (i.e. they
1598 1596 * won't be allocated/freed through hermon_rsrc_alloc/free). This
1599 1597 * latter option is used for both ALTC and CMPT resources which
1600 1598 * are managed by hardware.
1601 1599 */
1602 1600 if (rsrc_pool->rsrc_quantum != 0) {
1603 1601 vmp = vmem_create(info->hwi_rsrcname,
1604 1602 (void *)(uintptr_t)rsrc_pool->rsrc_start,
1605 1603 rsrc_pool->rsrc_pool_size, rsrc_pool->rsrc_quantum,
1606 1604 NULL, NULL, NULL, 0, VM_SLEEP);
1607 1605 if (vmp == NULL) {
1608 1606 /* failed to create vmem arena */
1609 1607 return (DDI_FAILURE);
1610 1608 }
1611 1609 rsrc_pool->rsrc_vmp = vmp;
1612 1610 if (hermon_rsrc_verbose) {
1613 1611 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init:"
1614 1612 " rsrc_type (0x%x) created vmem arena for rsrc",
1615 1613 rsrc_pool->rsrc_type);
1616 1614 }
1617 1615 } else {
1618 1616 /* we do not require a vmem arena */
1619 1617 rsrc_pool->rsrc_vmp = NULL;
1620 1618 if (hermon_rsrc_verbose) {
1621 1619 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init:"
1622 1620 " rsrc_type (0x%x) vmem arena not required",
1623 1621 rsrc_pool->rsrc_type);
1624 1622 }
1625 1623 }
1626 1624
1627 1625 /* Allocate hardware reserved resources, if any */
1628 1626 if (num_prealloc != 0) {
1629 1627 status = hermon_rsrc_alloc(state, rsrc_pool->rsrc_type,
1630 1628 num_prealloc, HERMON_SLEEP, &rsvd_rsrc);
1631 1629 if (status != DDI_SUCCESS) {
1632 1630 /* unable to preallocate the reserved entries */
1633 1631 if (rsrc_pool->rsrc_vmp != NULL) {
1634 1632 vmem_destroy(rsrc_pool->rsrc_vmp);
1635 1633 }
1636 1634 return (DDI_FAILURE);
1637 1635 }
1638 1636 }
1639 1637 rsrc_pool->rsrc_private = rsvd_rsrc;
1640 1638
1641 1639 return (DDI_SUCCESS);
1642 1640 }
1643 1641
1644 1642
1645 1643 /*
1646 1644 * hermon_rsrc_hw_entries_fini()
1647 1645 * Context: Only called from attach() and/or detach() path contexts
1648 1646 */
1649 1647 void
1650 1648 hermon_rsrc_hw_entries_fini(hermon_state_t *state,
1651 1649 hermon_rsrc_hw_entry_info_t *info)
1652 1650 {
1653 1651 hermon_rsrc_pool_info_t *rsrc_pool;
1654 1652 hermon_rsrc_t *rsvd_rsrc;
1655 1653
1656 1654 ASSERT(state != NULL);
1657 1655 ASSERT(info != NULL);
1658 1656
1659 1657 rsrc_pool = info->hwi_rsrcpool;
1660 1658 ASSERT(rsrc_pool != NULL);
1661 1659
1662 1660 /* Free up any "reserved" (i.e. preallocated) HW entries */
1663 1661 rsvd_rsrc = (hermon_rsrc_t *)rsrc_pool->rsrc_private;
1664 1662 if (rsvd_rsrc != NULL) {
1665 1663 hermon_rsrc_free(state, &rsvd_rsrc);
1666 1664 }
1667 1665
1668 1666 /*
1669 1667 * If we've actually setup a vmem arena for the HW entries, then
1670 1668 * destroy it now
1671 1669 */
1672 1670 if (rsrc_pool->rsrc_vmp != NULL) {
1673 1671 vmem_destroy(rsrc_pool->rsrc_vmp);
1674 1672 }
1675 1673 }
1676 1674
1677 1675
1678 1676 /*
1679 1677 * hermon_rsrc_sw_handles_init()
1680 1678 * Context: Only called from attach() path context
1681 1679 */
1682 1680 /* ARGSUSED */
1683 1681 static int
1684 1682 hermon_rsrc_sw_handles_init(hermon_state_t *state,
1685 1683 hermon_rsrc_sw_hdl_info_t *info)
1686 1684 {
1687 1685 hermon_rsrc_pool_info_t *rsrc_pool;
1688 1686 uint64_t num_swhdl, max_swhdl, prealloc_sz;
1689 1687
1690 1688 ASSERT(state != NULL);
1691 1689 ASSERT(info != NULL);
1692 1690
1693 1691 rsrc_pool = info->swi_rsrcpool;
1694 1692 ASSERT(rsrc_pool != NULL);
1695 1693 num_swhdl = info->swi_num;
1696 1694 max_swhdl = info->swi_max;
1697 1695 prealloc_sz = info->swi_prealloc_sz;
1698 1696
1699 1697
1700 1698 /* Make sure number of SW handles makes sense */
1701 1699 if (num_swhdl > max_swhdl) {
1702 1700 return (DDI_FAILURE);
1703 1701 }
1704 1702
1705 1703 /*
1706 1704 * Depending on the flags parameter, create a kmem_cache for some
1707 1705 * number of software handle structures. Note: kmem_cache_create()
1708 1706 * will SLEEP until successful.
1709 1707 */
1710 1708 if (info->swi_flags & HERMON_SWHDL_KMEMCACHE_INIT) {
1711 1709 rsrc_pool->rsrc_private = kmem_cache_create(
1712 1710 info->swi_rsrcname, rsrc_pool->rsrc_quantum, 0,
1713 1711 info->swi_constructor, info->swi_destructor, NULL,
1714 1712 rsrc_pool->rsrc_state, NULL, 0);
1715 1713 }
1716 1714
1717 1715
1718 1716 /* Allocate the central list of SW handle pointers */
1719 1717 if (info->swi_flags & HERMON_SWHDL_TABLE_INIT) {
1720 1718 info->swi_table_ptr = kmem_zalloc(num_swhdl * prealloc_sz,
1721 1719 KM_SLEEP);
1722 1720 }
1723 1721
1724 1722 return (DDI_SUCCESS);
1725 1723 }
1726 1724
1727 1725
1728 1726 /*
1729 1727 * hermon_rsrc_sw_handles_fini()
1730 1728 * Context: Only called from attach() and/or detach() path contexts
1731 1729 */
1732 1730 /* ARGSUSED */
1733 1731 static void
1734 1732 hermon_rsrc_sw_handles_fini(hermon_state_t *state,
1735 1733 hermon_rsrc_sw_hdl_info_t *info)
1736 1734 {
1737 1735 hermon_rsrc_pool_info_t *rsrc_pool;
1738 1736 uint64_t num_swhdl, prealloc_sz;
1739 1737
1740 1738 ASSERT(state != NULL);
1741 1739 ASSERT(info != NULL);
1742 1740
1743 1741 rsrc_pool = info->swi_rsrcpool;
1744 1742 num_swhdl = info->swi_num;
1745 1743 prealloc_sz = info->swi_prealloc_sz;
1746 1744
1747 1745 /*
1748 1746 * If a "software handle" kmem_cache exists for this resource, then
1749 1747 * destroy it now
1750 1748 */
1751 1749 if (rsrc_pool != NULL) {
1752 1750 kmem_cache_destroy(rsrc_pool->rsrc_private);
1753 1751 }
1754 1752
1755 1753 /* Free up this central list of SW handle pointers */
1756 1754 if (info->swi_table_ptr != NULL) {
1757 1755 kmem_free(info->swi_table_ptr, num_swhdl * prealloc_sz);
1758 1756 }
1759 1757 }
1760 1758
1761 1759
1762 1760 /*
1763 1761 * hermon_rsrc_pd_handles_init()
1764 1762 * Context: Only called from attach() path context
1765 1763 */
1766 1764 static int
1767 1765 hermon_rsrc_pd_handles_init(hermon_state_t *state,
1768 1766 hermon_rsrc_sw_hdl_info_t *info)
1769 1767 {
1770 1768 hermon_rsrc_pool_info_t *rsrc_pool;
1771 1769 vmem_t *vmp;
1772 1770 char vmem_name[HERMON_RSRC_NAME_MAXLEN];
1773 1771 int status;
1774 1772
1775 1773 ASSERT(state != NULL);
1776 1774 ASSERT(info != NULL);
1777 1775
1778 1776 rsrc_pool = info->swi_rsrcpool;
1779 1777 ASSERT(rsrc_pool != NULL);
1780 1778
1781 1779 /* Initialize the resource pool for software handle table */
1782 1780 status = hermon_rsrc_sw_handles_init(state, info);
1783 1781 if (status != DDI_SUCCESS) {
1784 1782 return (DDI_FAILURE);
1785 1783 }
1786 1784
1787 1785 /* Build vmem arena name from Hermon instance */
1788 1786 HERMON_RSRC_NAME(vmem_name, HERMON_PDHDL_VMEM);
1789 1787
1790 1788 /* Create new vmem arena for PD numbers */
1791 1789 vmp = vmem_create(vmem_name, (caddr_t)1, info->swi_num, 1, NULL,
1792 1790 NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
1793 1791 if (vmp == NULL) {
1794 1792 /* Unable to create vmem arena */
1795 1793 info->swi_table_ptr = NULL;
1796 1794 hermon_rsrc_sw_handles_fini(state, info);
1797 1795 return (DDI_FAILURE);
1798 1796 }
1799 1797 rsrc_pool->rsrc_vmp = vmp;
1800 1798
1801 1799 return (DDI_SUCCESS);
1802 1800 }
1803 1801
1804 1802
1805 1803 /*
1806 1804 * hermon_rsrc_pd_handles_fini()
1807 1805 * Context: Only called from attach() and/or detach() path contexts
1808 1806 */
1809 1807 static void
1810 1808 hermon_rsrc_pd_handles_fini(hermon_state_t *state,
1811 1809 hermon_rsrc_sw_hdl_info_t *info)
1812 1810 {
1813 1811 hermon_rsrc_pool_info_t *rsrc_pool;
1814 1812
1815 1813 ASSERT(state != NULL);
1816 1814 ASSERT(info != NULL);
1817 1815
1818 1816 rsrc_pool = info->swi_rsrcpool;
1819 1817
1820 1818 /* Destroy the specially created UAR scratch table vmem arena */
1821 1819 vmem_destroy(rsrc_pool->rsrc_vmp);
1822 1820
1823 1821 /* Destroy the "hermon_sw_pd_t" kmem_cache */
1824 1822 hermon_rsrc_sw_handles_fini(state, info);
1825 1823 }
1826 1824
1827 1825
1828 1826 /*
1829 1827 * hermon_rsrc_mbox_alloc()
1830 1828 * Context: Only called from attach() path context
1831 1829 */
1832 1830 static int
1833 1831 hermon_rsrc_mbox_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t num,
1834 1832 hermon_rsrc_t *hdl)
1835 1833 {
1836 1834 hermon_rsrc_priv_mbox_t *priv;
1837 1835 caddr_t kaddr;
1838 1836 size_t real_len, temp_len;
1839 1837 int status;
1840 1838
1841 1839 ASSERT(pool_info != NULL);
1842 1840 ASSERT(hdl != NULL);
1843 1841
1844 1842 /* Get the private pointer for the mailboxes */
1845 1843 priv = pool_info->rsrc_private;
1846 1844 ASSERT(priv != NULL);
1847 1845
1848 1846 /* Allocate a DMA handle for the mailbox */
1849 1847 status = ddi_dma_alloc_handle(priv->pmb_dip, &priv->pmb_dmaattr,
1850 1848 DDI_DMA_SLEEP, NULL, &hdl->hr_dmahdl);
1851 1849 if (status != DDI_SUCCESS) {
1852 1850 return (DDI_FAILURE);
1853 1851 }
1854 1852
1855 1853 /* Allocate memory for the mailbox */
1856 1854 temp_len = (num << pool_info->rsrc_shift);
1857 1855 status = ddi_dma_mem_alloc(hdl->hr_dmahdl, temp_len,
1858 1856 &priv->pmb_devaccattr, priv->pmb_xfer_mode, DDI_DMA_SLEEP,
1859 1857 NULL, &kaddr, &real_len, &hdl->hr_acchdl);
1860 1858 if (status != DDI_SUCCESS) {
1861 1859 /* No more memory available for mailbox entries */
1862 1860 ddi_dma_free_handle(&hdl->hr_dmahdl);
1863 1861 return (DDI_FAILURE);
1864 1862 }
1865 1863
1866 1864 hdl->hr_addr = (void *)kaddr;
1867 1865 hdl->hr_len = (uint32_t)real_len;
1868 1866
1869 1867 return (DDI_SUCCESS);
1870 1868 }
1871 1869
1872 1870
1873 1871 /*
1874 1872 * hermon_rsrc_mbox_free()
1875 1873 * Context: Can be called from interrupt or base context.
1876 1874 */
1877 1875 static void
1878 1876 hermon_rsrc_mbox_free(hermon_rsrc_t *hdl)
1879 1877 {
1880 1878 ASSERT(hdl != NULL);
1881 1879
1882 1880 /* Use ddi_dma_mem_free() to free up sys memory for mailbox */
1883 1881 ddi_dma_mem_free(&hdl->hr_acchdl);
1884 1882
1885 1883 /* Free the DMA handle for the mailbox */
1886 1884 ddi_dma_free_handle(&hdl->hr_dmahdl);
1887 1885 }
1888 1886
1889 1887
1890 1888 /*
1891 1889 * hermon_rsrc_hw_entry_alloc()
1892 1890 * Context: Can be called from interrupt or base context.
1893 1891 */
1894 1892 static int
1895 1893 hermon_rsrc_hw_entry_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t num,
1896 1894 uint_t num_align, uint_t sleepflag, hermon_rsrc_t *hdl)
1897 1895 {
1898 1896 void *addr;
1899 1897 uint64_t offset;
1900 1898 uint32_t align;
1901 1899 int status;
1902 1900 int flag;
1903 1901
1904 1902 ASSERT(pool_info != NULL);
1905 1903 ASSERT(hdl != NULL);
1906 1904
1907 1905 /*
1908 1906 * Use vmem_xalloc() to get a properly aligned pointer (based on
1909 1907 * the number requested) to the HW entry(ies). This handles the
1910 1908 * cases (for special QPCs and for RDB entries) where we need more
1911 1909 * than one and need to ensure that they are properly aligned.
1912 1910 */
1913 1911 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
1914 1912 hdl->hr_len = (num << pool_info->rsrc_shift);
1915 1913 align = (num_align << pool_info->rsrc_shift);
1916 1914
1917 1915 addr = vmem_xalloc(pool_info->rsrc_vmp, hdl->hr_len,
1918 1916 align, 0, 0, NULL, NULL, flag | VM_FIRSTFIT);
1919 1917
1920 1918 if (addr == NULL) {
1921 1919 /* No more HW entries available */
1922 1920 return (DDI_FAILURE);
1923 1921 }
1924 1922
1925 1923 hdl->hr_acchdl = NULL; /* only used for mbox resources */
1926 1924
1927 1925 /* Calculate vaddr and HW table index */
1928 1926 offset = (uintptr_t)addr - (uintptr_t)pool_info->rsrc_start;
1929 1927 hdl->hr_addr = addr; /* only used for mbox and uarpg resources */
1930 1928 hdl->hr_indx = offset >> pool_info->rsrc_shift;
1931 1929
1932 1930 if (pool_info->rsrc_loc == HERMON_IN_ICM) {
1933 1931 int num_to_hdl;
1934 1932 hermon_rsrc_type_t rsrc_type = pool_info->rsrc_type;
1935 1933
1936 1934 num_to_hdl = (rsrc_type == HERMON_QPC ||
1937 1935 rsrc_type == HERMON_CQC || rsrc_type == HERMON_SRQC);
1938 1936
1939 1937 /* confirm ICM is mapped, and allocate if necessary */
1940 1938 status = hermon_rsrc_hw_entry_icm_confirm(pool_info, num, hdl,
1941 1939 num_to_hdl);
1942 1940 if (status != DDI_SUCCESS) {
1943 1941 return (DDI_FAILURE);
1944 1942 }
1945 1943 hdl->hr_addr = NULL; /* not used for ICM resources */
1946 1944 }
1947 1945
1948 1946 return (DDI_SUCCESS);
1949 1947 }
1950 1948
1951 1949
1952 1950 /*
1953 1951 * hermon_rsrc_hw_entry_reserve()
1954 1952 * Context: Can be called from interrupt or base context.
1955 1953 */
1956 1954 int
1957 1955 hermon_rsrc_hw_entry_reserve(hermon_rsrc_pool_info_t *pool_info, uint_t num,
1958 1956 uint_t num_align, uint_t sleepflag, hermon_rsrc_t *hdl)
1959 1957 {
1960 1958 void *addr;
1961 1959 uint64_t offset;
1962 1960 uint32_t align;
1963 1961 int flag;
1964 1962
1965 1963 ASSERT(pool_info != NULL);
1966 1964 ASSERT(hdl != NULL);
1967 1965 ASSERT(pool_info->rsrc_loc == HERMON_IN_ICM);
1968 1966
1969 1967 /*
1970 1968 * Use vmem_xalloc() to get a properly aligned pointer (based on
1971 1969 * the number requested) to the HW entry(ies). This handles the
1972 1970 * cases (for special QPCs and for RDB entries) where we need more
1973 1971 * than one and need to ensure that they are properly aligned.
1974 1972 */
1975 1973 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
1976 1974 hdl->hr_len = (num << pool_info->rsrc_shift);
1977 1975 align = (num_align << pool_info->rsrc_shift);
1978 1976
1979 1977 addr = vmem_xalloc(pool_info->rsrc_vmp, hdl->hr_len,
1980 1978 align, 0, 0, NULL, NULL, flag | VM_FIRSTFIT);
1981 1979
1982 1980 if (addr == NULL) {
1983 1981 /* No more HW entries available */
1984 1982 return (DDI_FAILURE);
1985 1983 }
1986 1984
1987 1985 hdl->hr_acchdl = NULL; /* only used for mbox resources */
1988 1986
1989 1987 /* Calculate vaddr and HW table index */
1990 1988 offset = (uintptr_t)addr - (uintptr_t)pool_info->rsrc_start;
1991 1989 hdl->hr_addr = NULL;
1992 1990 hdl->hr_indx = offset >> pool_info->rsrc_shift;
1993 1991
1994 1992 /* ICM will be allocated and mapped if and when it gets used */
1995 1993
1996 1994 return (DDI_SUCCESS);
1997 1995 }
1998 1996
1999 1997
2000 1998 /*
2001 1999 * hermon_rsrc_hw_entry_free()
2002 2000 * Context: Can be called from interrupt or base context.
2003 2001 */
2004 2002 static void
2005 2003 hermon_rsrc_hw_entry_free(hermon_rsrc_pool_info_t *pool_info,
2006 2004 hermon_rsrc_t *hdl)
2007 2005 {
2008 2006 void *addr;
2009 2007 uint64_t offset;
2010 2008 int status;
2011 2009
2012 2010 ASSERT(pool_info != NULL);
2013 2011 ASSERT(hdl != NULL);
2014 2012
2015 2013 /* Calculate the allocated address */
2016 2014 offset = hdl->hr_indx << pool_info->rsrc_shift;
2017 2015 addr = (void *)(uintptr_t)(offset + (uintptr_t)pool_info->rsrc_start);
2018 2016
2019 2017 /* Use vmem_xfree() to free up the HW table entry */
2020 2018 vmem_xfree(pool_info->rsrc_vmp, addr, hdl->hr_len);
2021 2019
2022 2020 if (pool_info->rsrc_loc == HERMON_IN_ICM) {
2023 2021 int num_to_hdl;
2024 2022 hermon_rsrc_type_t rsrc_type = pool_info->rsrc_type;
2025 2023
2026 2024 num_to_hdl = (rsrc_type == HERMON_QPC ||
2027 2025 rsrc_type == HERMON_CQC || rsrc_type == HERMON_SRQC);
2028 2026
2029 2027 /* free ICM references, and free ICM if required */
2030 2028 status = hermon_rsrc_hw_entry_icm_free(pool_info, hdl,
2031 2029 num_to_hdl);
2032 2030 if (status != DDI_SUCCESS)
2033 2031 HERMON_WARNING(pool_info->rsrc_state,
2034 2032 "failure in hw_entry_free");
2035 2033 }
2036 2034 }
2037 2035
2038 2036 /*
2039 2037 * hermon_rsrc_hw_entry_icm_confirm()
2040 2038 * Context: Can be called from interrupt or base context.
2041 2039 */
2042 2040 static int
2043 2041 hermon_rsrc_hw_entry_icm_confirm(hermon_rsrc_pool_info_t *pool_info, uint_t num,
2044 2042 hermon_rsrc_t *hdl, int num_to_hdl)
2045 2043 {
2046 2044 hermon_state_t *state;
2047 2045 hermon_icm_table_t *icm_table;
2048 2046 uint8_t *bitmap;
2049 2047 hermon_dma_info_t *dma_info;
2050 2048 hermon_rsrc_type_t type;
2051 2049 uint32_t rindx, span_offset;
2052 2050 uint32_t span_avail;
2053 2051 int num_backed;
2054 2052 int status;
2055 2053 uint32_t index1, index2;
2056 2054
2057 2055 /*
2058 2056 * Utility routine responsible for ensuring that there is memory
2059 2057 * backing the ICM resources allocated via hermon_rsrc_hw_entry_alloc().
2060 2058 * Confirm existing ICM mapping(s) or allocate ICM memory for the
2061 2059 * given hardware resources being allocated, and increment the
2062 2060 * ICM DMA structure(s) reference count.
2063 2061 *
2064 2062 * We may be allocating more objects than can fit in a single span,
2065 2063 * or more than will fit in the remaining contiguous memory (from
2066 2064 * the offset indicated by hdl->ar_indx) in the span in question.
2067 2065 * In either of these cases, we'll be breaking up our allocation
2068 2066 * into multiple spans.
2069 2067 */
2070 2068 state = pool_info->rsrc_state;
2071 2069 type = pool_info->rsrc_type;
2072 2070 icm_table = &state->hs_icm[type];
2073 2071
2074 2072 rindx = hdl->hr_indx;
2075 2073 hermon_index(index1, index2, rindx, icm_table, span_offset);
↓ open down ↓ |
1739 lines elided |
↑ open up ↑ |
2076 2074
2077 2075 if (hermon_rsrc_verbose) {
2078 2076 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entry_icm_confirm: "
2079 2077 "type (0x%x) num (0x%x) length (0x%x) index (0x%x, 0x%x): ",
2080 2078 type, num, hdl->hr_len, index1, index2);
2081 2079 }
2082 2080
2083 2081 mutex_enter(&icm_table->icm_table_lock);
2084 2082 hermon_bitmap(bitmap, dma_info, icm_table, index1, num_to_hdl);
2085 2083 while (num) {
2086 -#ifndef __lock_lint
2087 2084 while (icm_table->icm_busy) {
2088 2085 cv_wait(&icm_table->icm_table_cv,
2089 2086 &icm_table->icm_table_lock);
2090 2087 }
2091 -#endif
2092 2088 if (!HERMON_BMAP_BIT_ISSET(bitmap, index2)) {
2093 2089 /* Allocate ICM for this span */
2094 2090 icm_table->icm_busy = 1;
2095 2091 mutex_exit(&icm_table->icm_table_lock);
2096 2092 status = hermon_icm_alloc(state, type, index1, index2);
2097 2093 mutex_enter(&icm_table->icm_table_lock);
2098 2094 icm_table->icm_busy = 0;
2099 2095 cv_broadcast(&icm_table->icm_table_cv);
2100 2096 if (status != DDI_SUCCESS) {
2101 2097 goto fail_alloc;
2102 2098 }
2103 2099 if (hermon_rsrc_verbose) {
2104 2100 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_"
2105 2101 "hw_entry_icm_confirm: ALLOCATED ICM: "
2106 2102 "type (0x%x) index (0x%x, 0x%x)",
2107 2103 type, index1, index2);
2108 2104 }
2109 2105 }
2110 2106
2111 2107 /*
2112 2108 * We need to increment the refcnt of this span by the
2113 2109 * number of objects in this resource allocation that are
2114 2110 * backed by this span. Given that the rsrc allocation is
2115 2111 * contiguous, this value will be the number of objects in
2116 2112 * the span from 'span_offset' onward, either up to a max
2117 2113 * of the total number of objects, or the end of the span.
2118 2114 * So, determine the number of objects that can be backed
2119 2115 * by this span ('span_avail'), then determine the number
2120 2116 * of backed resources.
2121 2117 */
2122 2118 span_avail = icm_table->span - span_offset;
2123 2119 if (num > span_avail) {
2124 2120 num_backed = span_avail;
2125 2121 } else {
2126 2122 num_backed = num;
2127 2123 }
2128 2124
2129 2125 /*
2130 2126 * Now that we know 'num_backed', increment the refcnt,
2131 2127 * decrement the total number, and set 'span_offset' to
2132 2128 * 0 in case we roll over into the next span.
2133 2129 */
2134 2130 dma_info[index2].icm_refcnt += num_backed;
2135 2131 rindx += num_backed;
2136 2132 num -= num_backed;
2137 2133
2138 2134 if (hermon_rsrc_verbose) {
2139 2135 IBTF_DPRINTF_L2("ALLOC", "ICM type (0x%x) index "
2140 2136 "(0x%x, 0x%x) num_backed (0x%x)",
2141 2137 type, index1, index2, num_backed);
2142 2138 IBTF_DPRINTF_L2("ALLOC", "ICM type (0x%x) refcnt now "
2143 2139 "(0x%x) num_remaining (0x%x)", type,
2144 2140 dma_info[index2].icm_refcnt, num);
2145 2141 }
2146 2142 if (num == 0)
2147 2143 break;
2148 2144
2149 2145 hermon_index(index1, index2, rindx, icm_table, span_offset);
2150 2146 hermon_bitmap(bitmap, dma_info, icm_table, index1, num_to_hdl);
2151 2147 }
2152 2148 mutex_exit(&icm_table->icm_table_lock);
2153 2149
2154 2150 return (DDI_SUCCESS);
2155 2151
2156 2152 fail_alloc:
2157 2153 /* JBDB */
2158 2154 if (hermon_rsrc_verbose) {
2159 2155 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_"
2160 2156 "hw_entry_icm_confirm: FAILED ICM ALLOC: "
2161 2157 "type (0x%x) num remaind (0x%x) index (0x%x, 0x%x)"
2162 2158 "refcnt (0x%x)", type, num, index1, index2,
2163 2159 icm_table->icm_dma[index1][index2].icm_refcnt);
2164 2160 }
2165 2161 IBTF_DPRINTF_L2("hermon", "WARNING: "
2166 2162 "unimplemented fail code in hermon_rsrc_hw_entry_icm_alloc\n");
2167 2163
2168 2164 #if needs_work
2169 2165 /* free refcnt's and any spans we've allocated */
2170 2166 while (index-- != start) {
2171 2167 /*
2172 2168 * JBDB - This is a bit tricky. We need to
2173 2169 * free refcnt's on any spans that we've
2174 2170 * incremented them on, and completely free
2175 2171 * spans that we've allocated. How do we do
2176 2172 * this here? Does it need to be as involved
2177 2173 * as the core of icm_free() below, or can
2178 2174 * we leverage breadcrumbs somehow?
2179 2175 */
2180 2176 HERMON_WARNING(state, "unable to allocate ICM memory: "
2181 2177 "UNIMPLEMENTED HANDLING!!");
2182 2178 }
2183 2179 #else
2184 2180 cmn_err(CE_WARN,
2185 2181 "unimplemented fail code in hermon_rsrc_hw_entry_icm_alloc\n");
2186 2182 #endif
2187 2183 mutex_exit(&icm_table->icm_table_lock);
2188 2184
2189 2185 HERMON_WARNING(state, "unable to allocate ICM memory");
2190 2186 return (DDI_FAILURE);
2191 2187 }
2192 2188
2193 2189 /*
2194 2190 * hermon_rsrc_hw_entry_icm_free()
2195 2191 * Context: Can be called from interrupt or base context.
2196 2192 */
2197 2193 static int
2198 2194 hermon_rsrc_hw_entry_icm_free(hermon_rsrc_pool_info_t *pool_info,
2199 2195 hermon_rsrc_t *hdl, int num_to_hdl)
2200 2196 {
2201 2197 hermon_state_t *state;
2202 2198 hermon_icm_table_t *icm_table;
2203 2199 uint8_t *bitmap;
2204 2200 hermon_dma_info_t *dma_info;
2205 2201 hermon_rsrc_type_t type;
2206 2202 uint32_t span_offset;
2207 2203 uint32_t span_remain;
2208 2204 int num_freed;
2209 2205 int num;
2210 2206 uint32_t index1, index2, rindx;
2211 2207
2212 2208 /*
2213 2209 * Utility routine responsible for freeing references to ICM
2214 2210 * DMA spans, and freeing the ICM memory if necessary.
2215 2211 *
2216 2212 * We may have allocated objects in a single contiguous resource
2217 2213 * allocation that reside in a number of spans, at any given
2218 2214 * starting offset within a span. We therefore must determine
2219 2215 * where this allocation starts, and then determine if we need
2220 2216 * to free objects in more than one span.
2221 2217 */
2222 2218 state = pool_info->rsrc_state;
2223 2219 type = pool_info->rsrc_type;
2224 2220 icm_table = &state->hs_icm[type];
2225 2221
2226 2222 rindx = hdl->hr_indx;
2227 2223 hermon_index(index1, index2, rindx, icm_table, span_offset);
2228 2224 hermon_bitmap(bitmap, dma_info, icm_table, index1, num_to_hdl);
2229 2225
2230 2226 /* determine the number of ICM objects in this allocation */
2231 2227 num = hdl->hr_len >> pool_info->rsrc_shift;
2232 2228
2233 2229 if (hermon_rsrc_verbose) {
2234 2230 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entry_icm_free: "
2235 2231 "type (0x%x) num (0x%x) length (0x%x) index (0x%x, 0x%x)",
2236 2232 type, num, hdl->hr_len, index1, index2);
2237 2233 }
2238 2234 mutex_enter(&icm_table->icm_table_lock);
2239 2235 while (num) {
2240 2236 /*
2241 2237 * As with the ICM confirm code above, we need to
2242 2238 * decrement the ICM span(s) by the number of
2243 2239 * resources being freed. So, determine the number
2244 2240 * of objects that are backed in this span from
2245 2241 * 'span_offset' onward, and set 'num_freed' to
2246 2242 * the smaller of either that number ('span_remain'),
2247 2243 * or the total number of objects being freed.
2248 2244 */
2249 2245 span_remain = icm_table->span - span_offset;
2250 2246 if (num > span_remain) {
2251 2247 num_freed = span_remain;
2252 2248 } else {
2253 2249 num_freed = num;
2254 2250 }
2255 2251
2256 2252 /*
2257 2253 * Now that we know 'num_freed', decrement the refcnt,
2258 2254 * decrement the total number, and set 'span_offset' to
2259 2255 * 0 in case we roll over into the next span.
2260 2256 */
2261 2257 dma_info[index2].icm_refcnt -= num_freed;
2262 2258 num -= num_freed;
2263 2259 rindx += num_freed;
2264 2260
2265 2261 if (hermon_rsrc_verbose) {
2266 2262 IBTF_DPRINTF_L2("FREE", "ICM type (0x%x) index "
2267 2263 "(0x%x, 0x%x) num_freed (0x%x)", type,
2268 2264 index1, index2, num_freed);
2269 2265 IBTF_DPRINTF_L2("FREE", "ICM type (0x%x) refcnt now "
2270 2266 "(0x%x) num remaining (0x%x)", type,
2271 2267 icm_table->icm_dma[index1][index2].icm_refcnt, num);
2272 2268 }
2273 2269
2274 2270 #if HERMON_ICM_FREE_ENABLED
2275 2271 /* If we've freed the last object in this span, free it */
2276 2272 if ((index1 != 0 || index2 != 0) &&
2277 2273 (dma_info[index2].icm_refcnt == 0)) {
2278 2274 if (hermon_rsrc_verbose) {
2279 2275 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entry"
2280 2276 "_icm_free: freeing ICM type (0x%x) index"
2281 2277 " (0x%x, 0x%x)", type, index1, index2);
2282 2278 }
2283 2279 hermon_icm_free(state, type, index1, index2);
2284 2280 }
2285 2281 #endif
2286 2282 if (num == 0)
2287 2283 break;
2288 2284
2289 2285 hermon_index(index1, index2, rindx, icm_table, span_offset);
2290 2286 hermon_bitmap(bitmap, dma_info, icm_table, index1, num_to_hdl);
2291 2287 }
2292 2288 mutex_exit(&icm_table->icm_table_lock);
2293 2289
2294 2290 return (DDI_SUCCESS);
2295 2291 }
2296 2292
2297 2293
2298 2294
2299 2295 /*
2300 2296 * hermon_rsrc_swhdl_alloc()
2301 2297 * Context: Can be called from interrupt or base context.
2302 2298 */
2303 2299 static int
2304 2300 hermon_rsrc_swhdl_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t sleepflag,
2305 2301 hermon_rsrc_t *hdl)
2306 2302 {
2307 2303 void *addr;
2308 2304 int flag;
2309 2305
2310 2306 ASSERT(pool_info != NULL);
2311 2307 ASSERT(hdl != NULL);
2312 2308
2313 2309 /* Allocate the software handle structure */
2314 2310 flag = (sleepflag == HERMON_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
2315 2311 addr = kmem_cache_alloc(pool_info->rsrc_private, flag);
2316 2312 if (addr == NULL) {
2317 2313 return (DDI_FAILURE);
2318 2314 }
2319 2315 hdl->hr_len = pool_info->rsrc_quantum;
2320 2316 hdl->hr_addr = addr;
2321 2317
2322 2318 return (DDI_SUCCESS);
2323 2319 }
2324 2320
2325 2321
2326 2322 /*
2327 2323 * hermon_rsrc_swhdl_free()
2328 2324 * Context: Can be called from interrupt or base context.
2329 2325 */
2330 2326 static void
2331 2327 hermon_rsrc_swhdl_free(hermon_rsrc_pool_info_t *pool_info, hermon_rsrc_t *hdl)
2332 2328 {
2333 2329 ASSERT(pool_info != NULL);
2334 2330 ASSERT(hdl != NULL);
2335 2331
2336 2332 /* Free the software handle structure */
2337 2333 kmem_cache_free(pool_info->rsrc_private, hdl->hr_addr);
2338 2334 }
2339 2335
2340 2336
2341 2337 /*
2342 2338 * hermon_rsrc_pdhdl_alloc()
2343 2339 * Context: Can be called from interrupt or base context.
2344 2340 */
2345 2341 static int
2346 2342 hermon_rsrc_pdhdl_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t sleepflag,
2347 2343 hermon_rsrc_t *hdl)
2348 2344 {
2349 2345 hermon_pdhdl_t addr;
2350 2346 void *tmpaddr;
2351 2347 int flag, status;
↓ open down ↓ |
250 lines elided |
↑ open up ↑ |
2352 2348
2353 2349 ASSERT(pool_info != NULL);
2354 2350 ASSERT(hdl != NULL);
2355 2351
2356 2352 /* Allocate the software handle */
2357 2353 status = hermon_rsrc_swhdl_alloc(pool_info, sleepflag, hdl);
2358 2354 if (status != DDI_SUCCESS) {
2359 2355 return (DDI_FAILURE);
2360 2356 }
2361 2357 addr = (hermon_pdhdl_t)hdl->hr_addr;
2362 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*addr))
2363 2358
2364 2359 /* Allocate a PD number for the handle */
2365 2360 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
2366 2361 tmpaddr = vmem_alloc(pool_info->rsrc_vmp, 1, flag);
2367 2362 if (tmpaddr == NULL) {
2368 2363 /* No more PD number entries available */
2369 2364 hermon_rsrc_swhdl_free(pool_info, hdl);
2370 2365 return (DDI_FAILURE);
2371 2366 }
2372 2367 addr->pd_pdnum = (uint32_t)(uintptr_t)tmpaddr;
2373 2368 addr->pd_rsrcp = hdl;
2374 2369 hdl->hr_indx = addr->pd_pdnum;
2375 2370
2376 2371 return (DDI_SUCCESS);
2377 2372 }
2378 2373
2379 2374
2380 2375 /*
2381 2376 * hermon_rsrc_pdhdl_free()
2382 2377 * Context: Can be called from interrupt or base context.
2383 2378 */
2384 2379 static void
2385 2380 hermon_rsrc_pdhdl_free(hermon_rsrc_pool_info_t *pool_info, hermon_rsrc_t *hdl)
2386 2381 {
2387 2382 ASSERT(pool_info != NULL);
2388 2383 ASSERT(hdl != NULL);
2389 2384
2390 2385 /* Use vmem_free() to free up the PD number */
2391 2386 vmem_free(pool_info->rsrc_vmp, (void *)(uintptr_t)hdl->hr_indx, 1);
2392 2387
2393 2388 /* Free the software handle structure */
2394 2389 hermon_rsrc_swhdl_free(pool_info, hdl);
2395 2390 }
2396 2391
2397 2392
2398 2393 /*
2399 2394 * hermon_rsrc_pdhdl_constructor()
2400 2395 * Context: Can be called from interrupt or base context.
2401 2396 */
2402 2397 /* ARGSUSED */
2403 2398 static int
2404 2399 hermon_rsrc_pdhdl_constructor(void *pd, void *priv, int flags)
2405 2400 {
2406 2401 hermon_pdhdl_t pdhdl;
2407 2402 hermon_state_t *state;
2408 2403
2409 2404 pdhdl = (hermon_pdhdl_t)pd;
2410 2405 state = (hermon_state_t *)priv;
2411 2406
2412 2407 mutex_init(&pdhdl->pd_lock, NULL, MUTEX_DRIVER,
2413 2408 DDI_INTR_PRI(state->hs_intrmsi_pri));
2414 2409
2415 2410 return (DDI_SUCCESS);
2416 2411 }
2417 2412
2418 2413
2419 2414 /*
2420 2415 * hermon_rsrc_pdhdl_destructor()
2421 2416 * Context: Can be called from interrupt or base context.
2422 2417 */
2423 2418 /* ARGSUSED */
2424 2419 static void
2425 2420 hermon_rsrc_pdhdl_destructor(void *pd, void *priv)
2426 2421 {
2427 2422 hermon_pdhdl_t pdhdl;
2428 2423
2429 2424 pdhdl = (hermon_pdhdl_t)pd;
2430 2425
2431 2426 mutex_destroy(&pdhdl->pd_lock);
2432 2427 }
2433 2428
2434 2429
2435 2430 /*
2436 2431 * hermon_rsrc_cqhdl_constructor()
2437 2432 * Context: Can be called from interrupt or base context.
2438 2433 */
2439 2434 /* ARGSUSED */
2440 2435 static int
2441 2436 hermon_rsrc_cqhdl_constructor(void *cq, void *priv, int flags)
2442 2437 {
2443 2438 hermon_cqhdl_t cqhdl;
2444 2439 hermon_state_t *state;
2445 2440
2446 2441 cqhdl = (hermon_cqhdl_t)cq;
2447 2442 state = (hermon_state_t *)priv;
2448 2443
2449 2444 mutex_init(&cqhdl->cq_lock, NULL, MUTEX_DRIVER,
2450 2445 DDI_INTR_PRI(state->hs_intrmsi_pri));
2451 2446
2452 2447 return (DDI_SUCCESS);
2453 2448 }
2454 2449
2455 2450
2456 2451 /*
2457 2452 * hermon_rsrc_cqhdl_destructor()
2458 2453 * Context: Can be called from interrupt or base context.
2459 2454 */
2460 2455 /* ARGSUSED */
2461 2456 static void
2462 2457 hermon_rsrc_cqhdl_destructor(void *cq, void *priv)
2463 2458 {
2464 2459 hermon_cqhdl_t cqhdl;
2465 2460
2466 2461 cqhdl = (hermon_cqhdl_t)cq;
2467 2462
2468 2463 mutex_destroy(&cqhdl->cq_lock);
2469 2464 }
2470 2465
2471 2466
2472 2467 /*
2473 2468 * hermon_rsrc_qphdl_constructor()
2474 2469 * Context: Can be called from interrupt or base context.
2475 2470 */
2476 2471 /* ARGSUSED */
2477 2472 static int
2478 2473 hermon_rsrc_qphdl_constructor(void *qp, void *priv, int flags)
2479 2474 {
2480 2475 hermon_qphdl_t qphdl;
2481 2476 hermon_state_t *state;
2482 2477
2483 2478 qphdl = (hermon_qphdl_t)qp;
2484 2479 state = (hermon_state_t *)priv;
2485 2480
2486 2481 mutex_init(&qphdl->qp_lock, NULL, MUTEX_DRIVER,
2487 2482 DDI_INTR_PRI(state->hs_intrmsi_pri));
2488 2483
2489 2484 return (DDI_SUCCESS);
2490 2485 }
2491 2486
2492 2487
2493 2488 /*
2494 2489 * hermon_rsrc_qphdl_destructor()
2495 2490 * Context: Can be called from interrupt or base context.
2496 2491 */
2497 2492 /* ARGSUSED */
2498 2493 static void
2499 2494 hermon_rsrc_qphdl_destructor(void *qp, void *priv)
2500 2495 {
2501 2496 hermon_qphdl_t qphdl;
2502 2497
2503 2498 qphdl = (hermon_qphdl_t)qp;
2504 2499
2505 2500 mutex_destroy(&qphdl->qp_lock);
2506 2501 }
2507 2502
2508 2503
2509 2504 /*
2510 2505 * hermon_rsrc_srqhdl_constructor()
2511 2506 * Context: Can be called from interrupt or base context.
2512 2507 */
2513 2508 /* ARGSUSED */
2514 2509 static int
2515 2510 hermon_rsrc_srqhdl_constructor(void *srq, void *priv, int flags)
2516 2511 {
2517 2512 hermon_srqhdl_t srqhdl;
2518 2513 hermon_state_t *state;
2519 2514
2520 2515 srqhdl = (hermon_srqhdl_t)srq;
2521 2516 state = (hermon_state_t *)priv;
2522 2517
2523 2518 mutex_init(&srqhdl->srq_lock, NULL, MUTEX_DRIVER,
2524 2519 DDI_INTR_PRI(state->hs_intrmsi_pri));
2525 2520
2526 2521 return (DDI_SUCCESS);
2527 2522 }
2528 2523
2529 2524
2530 2525 /*
2531 2526 * hermon_rsrc_srqhdl_destructor()
2532 2527 * Context: Can be called from interrupt or base context.
2533 2528 */
2534 2529 /* ARGSUSED */
2535 2530 static void
2536 2531 hermon_rsrc_srqhdl_destructor(void *srq, void *priv)
2537 2532 {
2538 2533 hermon_srqhdl_t srqhdl;
2539 2534
2540 2535 srqhdl = (hermon_srqhdl_t)srq;
2541 2536
2542 2537 mutex_destroy(&srqhdl->srq_lock);
2543 2538 }
2544 2539
2545 2540
2546 2541 /*
2547 2542 * hermon_rsrc_refcnt_constructor()
2548 2543 * Context: Can be called from interrupt or base context.
2549 2544 */
2550 2545 /* ARGSUSED */
2551 2546 static int
2552 2547 hermon_rsrc_refcnt_constructor(void *rc, void *priv, int flags)
2553 2548 {
2554 2549 hermon_sw_refcnt_t *refcnt;
2555 2550 hermon_state_t *state;
2556 2551
2557 2552 refcnt = (hermon_sw_refcnt_t *)rc;
2558 2553 state = (hermon_state_t *)priv;
2559 2554
2560 2555 mutex_init(&refcnt->swrc_lock, NULL, MUTEX_DRIVER,
2561 2556 DDI_INTR_PRI(state->hs_intrmsi_pri));
2562 2557
2563 2558 return (DDI_SUCCESS);
2564 2559 }
2565 2560
2566 2561
2567 2562 /*
2568 2563 * hermon_rsrc_refcnt_destructor()
2569 2564 * Context: Can be called from interrupt or base context.
2570 2565 */
2571 2566 /* ARGSUSED */
2572 2567 static void
2573 2568 hermon_rsrc_refcnt_destructor(void *rc, void *priv)
2574 2569 {
2575 2570 hermon_sw_refcnt_t *refcnt;
2576 2571
2577 2572 refcnt = (hermon_sw_refcnt_t *)rc;
2578 2573
2579 2574 mutex_destroy(&refcnt->swrc_lock);
2580 2575 }
2581 2576
2582 2577
2583 2578 /*
2584 2579 * hermon_rsrc_ahhdl_constructor()
2585 2580 * Context: Can be called from interrupt or base context.
2586 2581 */
2587 2582 /* ARGSUSED */
2588 2583 static int
2589 2584 hermon_rsrc_ahhdl_constructor(void *ah, void *priv, int flags)
2590 2585 {
2591 2586 hermon_ahhdl_t ahhdl;
2592 2587 hermon_state_t *state;
2593 2588
2594 2589 ahhdl = (hermon_ahhdl_t)ah;
2595 2590 state = (hermon_state_t *)priv;
2596 2591
2597 2592 mutex_init(&ahhdl->ah_lock, NULL, MUTEX_DRIVER,
2598 2593 DDI_INTR_PRI(state->hs_intrmsi_pri));
2599 2594 return (DDI_SUCCESS);
2600 2595 }
2601 2596
2602 2597
2603 2598 /*
2604 2599 * hermon_rsrc_ahhdl_destructor()
2605 2600 * Context: Can be called from interrupt or base context.
2606 2601 */
2607 2602 /* ARGSUSED */
2608 2603 static void
2609 2604 hermon_rsrc_ahhdl_destructor(void *ah, void *priv)
2610 2605 {
2611 2606 hermon_ahhdl_t ahhdl;
2612 2607
2613 2608 ahhdl = (hermon_ahhdl_t)ah;
2614 2609
2615 2610 mutex_destroy(&ahhdl->ah_lock);
2616 2611 }
2617 2612
2618 2613
2619 2614 /*
2620 2615 * hermon_rsrc_mrhdl_constructor()
2621 2616 * Context: Can be called from interrupt or base context.
2622 2617 */
2623 2618 /* ARGSUSED */
2624 2619 static int
2625 2620 hermon_rsrc_mrhdl_constructor(void *mr, void *priv, int flags)
2626 2621 {
2627 2622 hermon_mrhdl_t mrhdl;
2628 2623 hermon_state_t *state;
2629 2624
2630 2625 mrhdl = (hermon_mrhdl_t)mr;
2631 2626 state = (hermon_state_t *)priv;
2632 2627
2633 2628 mutex_init(&mrhdl->mr_lock, NULL, MUTEX_DRIVER,
2634 2629 DDI_INTR_PRI(state->hs_intrmsi_pri));
2635 2630
2636 2631 return (DDI_SUCCESS);
2637 2632 }
2638 2633
2639 2634
2640 2635 /*
2641 2636 * hermon_rsrc_mrhdl_destructor()
2642 2637 * Context: Can be called from interrupt or base context.
2643 2638 */
2644 2639 /* ARGSUSED */
2645 2640 static void
2646 2641 hermon_rsrc_mrhdl_destructor(void *mr, void *priv)
2647 2642 {
2648 2643 hermon_mrhdl_t mrhdl;
2649 2644
2650 2645 mrhdl = (hermon_mrhdl_t)mr;
2651 2646
2652 2647 mutex_destroy(&mrhdl->mr_lock);
2653 2648 }
2654 2649
2655 2650
2656 2651 /*
2657 2652 * hermon_rsrc_mcg_entry_get_size()
2658 2653 */
2659 2654 static int
2660 2655 hermon_rsrc_mcg_entry_get_size(hermon_state_t *state, uint_t *mcg_size_shift)
2661 2656 {
2662 2657 uint_t num_qp_per_mcg, max_qp_per_mcg, log2;
2663 2658
2664 2659 /*
2665 2660 * Round the configured number of QP per MCG to next larger
2666 2661 * power-of-2 size and update.
2667 2662 */
2668 2663 num_qp_per_mcg = state->hs_cfg_profile->cp_num_qp_per_mcg + 8;
2669 2664 log2 = highbit(num_qp_per_mcg);
2670 2665 if (ISP2(num_qp_per_mcg)) {
2671 2666 log2 = log2 - 1;
2672 2667 }
2673 2668 state->hs_cfg_profile->cp_num_qp_per_mcg = (1 << log2) - 8;
2674 2669
2675 2670 /* Now make sure number of QP per MCG makes sense */
2676 2671 num_qp_per_mcg = state->hs_cfg_profile->cp_num_qp_per_mcg;
2677 2672 max_qp_per_mcg = (1 << state->hs_devlim.log_max_qp_mcg);
2678 2673 if (num_qp_per_mcg > max_qp_per_mcg) {
2679 2674 return (DDI_FAILURE);
2680 2675 }
2681 2676
2682 2677 /* Return the (shift) size of an individual MCG HW entry */
2683 2678 *mcg_size_shift = log2 + 2;
2684 2679
2685 2680 return (DDI_SUCCESS);
2686 2681 }
↓ open down ↓ |
314 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX