Print this page
8368 remove warlock leftovers from usr/src/uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ib/adapters/hermon/hermon_misc.c
+++ new/usr/src/uts/common/io/ib/adapters/hermon/hermon_misc.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * hermon_misc.c
28 28 * Hermon Miscellaneous routines - Address Handle, Multicast, Protection
29 29 * Domain, and port-related operations
30 30 *
31 31 * Implements all the routines necessary for allocating, freeing, querying
32 32 * and modifying Address Handles and Protection Domains. Also implements
33 33 * all the routines necessary for adding and removing Queue Pairs to/from
34 34 * Multicast Groups. Lastly, it implements the routines necessary for
35 35 * port-related query and modify operations.
36 36 */
37 37
38 38 #include <sys/types.h>
39 39 #include <sys/conf.h>
40 40 #include <sys/ddi.h>
41 41 #include <sys/sunddi.h>
42 42 #include <sys/modctl.h>
43 43 #include <sys/bitmap.h>
44 44 #include <sys/sysmacros.h>
45 45
46 46 #include <sys/ib/adapters/hermon/hermon.h>
47 47
48 48 extern int hermon_rdma_debug;
49 49 int hermon_fmr_verbose = 0;
50 50
51 51 static int hermon_mcg_qplist_add(hermon_state_t *state, hermon_mcghdl_t mcg,
52 52 hermon_hw_mcg_qp_list_t *mcg_qplist, hermon_qphdl_t qp, uint_t *qp_found);
53 53 static int hermon_mcg_qplist_remove(hermon_mcghdl_t mcg,
54 54 hermon_hw_mcg_qp_list_t *mcg_qplist, hermon_qphdl_t qp);
55 55 static void hermon_qp_mcg_refcnt_inc(hermon_qphdl_t qp);
56 56 static void hermon_qp_mcg_refcnt_dec(hermon_qphdl_t qp);
57 57 static uint_t hermon_mcg_walk_mgid_hash(hermon_state_t *state,
58 58 uint64_t start_indx, ib_gid_t mgid, uint_t *prev_indx);
59 59 static void hermon_mcg_setup_new_hdr(hermon_mcghdl_t mcg,
60 60 hermon_hw_mcg_t *mcg_hdr, ib_gid_t mgid, hermon_rsrc_t *mcg_rsrc);
61 61 static int hermon_mcg_hash_list_remove(hermon_state_t *state, uint_t curr_indx,
62 62 uint_t prev_indx, hermon_hw_mcg_t *mcg_entry);
63 63 static int hermon_mcg_entry_invalidate(hermon_state_t *state,
64 64 hermon_hw_mcg_t *mcg_entry, uint_t indx);
65 65 static int hermon_mgid_is_valid(ib_gid_t gid);
66 66 static int hermon_mlid_is_valid(ib_lid_t lid);
67 67 static void hermon_fmr_cleanup(hermon_fmrhdl_t pool);
68 68
69 69
70 70 #define HERMON_MAX_DBR_PAGES_PER_USER 64
71 71 #define HERMON_DBR_KEY(index, page) \
72 72 (((uint64_t)index) * HERMON_MAX_DBR_PAGES_PER_USER + (page))
73 73
74 74 static hermon_udbr_page_t *
75 75 hermon_dbr_new_user_page(hermon_state_t *state, uint_t index,
76 76 uint_t page)
77 77 {
78 78 hermon_udbr_page_t *pagep;
79 79 ddi_dma_attr_t dma_attr;
80 80 uint_t cookiecnt;
81 81 int status;
82 82 hermon_umap_db_entry_t *umapdb;
83 83 ulong_t pagesize = PAGESIZE;
84 84
85 85 pagep = kmem_alloc(sizeof (*pagep), KM_SLEEP);
86 86 pagep->upg_index = page;
87 87 pagep->upg_nfree = pagesize / sizeof (hermon_dbr_t);
88 88
89 89 /* Allocate 1 bit per dbr for free/alloc management (0 => "free") */
90 90 pagep->upg_free = kmem_zalloc(pagesize / sizeof (hermon_dbr_t) / 8,
91 91 KM_SLEEP);
92 92 pagep->upg_kvaddr = ddi_umem_alloc(pagesize, DDI_UMEM_SLEEP,
93 93 &pagep->upg_umemcookie); /* not HERMON_PAGESIZE here */
94 94
95 95 pagep->upg_buf = ddi_umem_iosetup(pagep->upg_umemcookie, 0,
96 96 pagesize, B_WRITE, 0, 0, NULL, DDI_UMEM_SLEEP);
97 97
98 98 hermon_dma_attr_init(state, &dma_attr);
99 99 #ifdef __sparc
100 100 if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
101 101 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
102 102 #endif
103 103 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
104 104 DDI_DMA_SLEEP, NULL, &pagep->upg_dmahdl);
105 105 if (status != DDI_SUCCESS) {
106 106 IBTF_DPRINTF_L2("hermon", "hermon_new_user_page: "
107 107 "ddi_dma_buf_bind_handle failed: %d", status);
108 108 return (NULL);
109 109 }
110 110 status = ddi_dma_buf_bind_handle(pagep->upg_dmahdl,
111 111 pagep->upg_buf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
112 112 DDI_DMA_SLEEP, NULL, &pagep->upg_dmacookie, &cookiecnt);
113 113 if (status != DDI_SUCCESS) {
114 114 IBTF_DPRINTF_L2("hermon", "hermon_dbr_new_user_page: "
115 115 "ddi_dma_buf_bind_handle failed: %d", status);
116 116 ddi_dma_free_handle(&pagep->upg_dmahdl);
117 117 return (NULL);
118 118 }
119 119 ASSERT(cookiecnt == 1);
120 120
121 121 /* create db entry for mmap */
122 122 umapdb = hermon_umap_db_alloc(state->hs_instance,
123 123 HERMON_DBR_KEY(index, page), MLNX_UMAP_DBRMEM_RSRC,
124 124 (uint64_t)(uintptr_t)pagep);
125 125 hermon_umap_db_add(umapdb);
126 126 return (pagep);
127 127 }
128 128
129 129
130 130 /*ARGSUSED*/
131 131 static int
132 132 hermon_user_dbr_alloc(hermon_state_t *state, uint_t index,
133 133 ddi_acc_handle_t *acchdl, hermon_dbr_t **vdbr, uint64_t *pdbr,
134 134 uint64_t *mapoffset)
135 135 {
136 136 hermon_user_dbr_t *udbr;
137 137 hermon_udbr_page_t *pagep;
138 138 uint_t next_page;
139 139 int dbr_index;
140 140 int i1, i2, i3, last;
141 141 uint64_t u64, mask;
142 142
143 143 mutex_enter(&state->hs_dbr_lock);
144 144 for (udbr = state->hs_user_dbr; udbr != NULL; udbr = udbr->udbr_link)
145 145 if (udbr->udbr_index == index)
146 146 break;
147 147 if (udbr == NULL) {
148 148 udbr = kmem_alloc(sizeof (*udbr), KM_SLEEP);
149 149 udbr->udbr_link = state->hs_user_dbr;
150 150 state->hs_user_dbr = udbr;
151 151 udbr->udbr_index = index;
152 152 udbr->udbr_pagep = NULL;
153 153 }
154 154 pagep = udbr->udbr_pagep;
155 155 next_page = (pagep == NULL) ? 0 : (pagep->upg_index + 1);
156 156 while (pagep != NULL)
157 157 if (pagep->upg_nfree > 0)
158 158 break;
159 159 else
160 160 pagep = pagep->upg_link;
161 161 if (pagep == NULL) {
162 162 pagep = hermon_dbr_new_user_page(state, index, next_page);
163 163 if (pagep == NULL) {
164 164 mutex_exit(&state->hs_dbr_lock);
165 165 return (DDI_FAILURE);
166 166 }
167 167 pagep->upg_link = udbr->udbr_pagep;
168 168 udbr->udbr_pagep = pagep;
169 169 }
170 170
171 171 /* Since nfree > 0, we're assured the loops below will succeed */
172 172
173 173 /* First, find a 64-bit (not ~0) that has a free dbr */
174 174 last = PAGESIZE / sizeof (uint64_t) / 64;
175 175 mask = ~0ull;
176 176 for (i1 = 0; i1 < last; i1++)
177 177 if ((pagep->upg_free[i1] & mask) != mask)
178 178 break;
179 179 u64 = pagep->upg_free[i1];
180 180
181 181 /* Second, find a byte (not 0xff) that has a free dbr */
182 182 last = sizeof (uint64_t) / sizeof (uint8_t);
183 183 for (i2 = 0, mask = 0xff; i2 < last; i2++, mask <<= 8)
184 184 if ((u64 & mask) != mask)
185 185 break;
186 186
187 187 /* Third, find a bit that is free (0) */
188 188 for (i3 = 0; i3 < sizeof (uint64_t) / sizeof (uint8_t); i3++)
189 189 if ((u64 & (1ul << (i3 + 8 * i2))) == 0)
190 190 break;
191 191
192 192 /* Mark it as allocated */
193 193 pagep->upg_free[i1] |= (1ul << (i3 + 8 * i2));
194 194
195 195 dbr_index = ((i1 * sizeof (uint64_t)) + i2) * sizeof (uint64_t) + i3;
196 196 pagep->upg_nfree--;
197 197 ((uint64_t *)(void *)pagep->upg_kvaddr)[dbr_index] = 0; /* clear dbr */
198 198 *mapoffset = ((HERMON_DBR_KEY(index, pagep->upg_index) <<
199 199 MLNX_UMAP_RSRC_TYPE_SHIFT) | MLNX_UMAP_DBRMEM_RSRC) << PAGESHIFT;
200 200 *vdbr = (hermon_dbr_t *)((uint64_t *)(void *)pagep->upg_kvaddr +
201 201 dbr_index);
202 202 *pdbr = pagep->upg_dmacookie.dmac_laddress + dbr_index *
203 203 sizeof (uint64_t);
204 204
205 205 mutex_exit(&state->hs_dbr_lock);
206 206 return (DDI_SUCCESS);
207 207 }
208 208
209 209 static void
210 210 hermon_user_dbr_free(hermon_state_t *state, uint_t index, hermon_dbr_t *record)
211 211 {
212 212 hermon_user_dbr_t *udbr;
213 213 hermon_udbr_page_t *pagep;
214 214 caddr_t kvaddr;
215 215 uint_t dbr_index;
216 216 uint_t max_free = PAGESIZE / sizeof (hermon_dbr_t);
217 217 int i1, i2;
218 218
219 219 dbr_index = (uintptr_t)record & PAGEOFFSET; /* offset (not yet index) */
220 220 kvaddr = (caddr_t)record - dbr_index;
221 221 dbr_index /= sizeof (hermon_dbr_t); /* now it's the index */
222 222
223 223 mutex_enter(&state->hs_dbr_lock);
224 224 for (udbr = state->hs_user_dbr; udbr != NULL; udbr = udbr->udbr_link)
225 225 if (udbr->udbr_index == index)
226 226 break;
227 227 if (udbr == NULL) {
228 228 IBTF_DPRINTF_L2("hermon", "free user dbr: udbr struct not "
229 229 "found for index %x", index);
230 230 mutex_exit(&state->hs_dbr_lock);
231 231 return;
232 232 }
233 233 for (pagep = udbr->udbr_pagep; pagep != NULL; pagep = pagep->upg_link)
234 234 if (pagep->upg_kvaddr == kvaddr)
235 235 break;
236 236 if (pagep == NULL) {
237 237 IBTF_DPRINTF_L2("hermon", "free user dbr: pagep struct not"
238 238 " found for index %x, kvaddr %p, DBR index %x",
239 239 index, kvaddr, dbr_index);
240 240 mutex_exit(&state->hs_dbr_lock);
241 241 return;
242 242 }
243 243 if (pagep->upg_nfree >= max_free) {
244 244 IBTF_DPRINTF_L2("hermon", "free user dbr: overflow: "
245 245 "UCE index %x, DBR index %x", index, dbr_index);
246 246 mutex_exit(&state->hs_dbr_lock);
247 247 return;
248 248 }
249 249 ASSERT(dbr_index < max_free);
250 250 i1 = dbr_index / 64;
251 251 i2 = dbr_index % 64;
252 252 ASSERT((pagep->upg_free[i1] & (1ul << i2)) == (1ul << i2));
253 253 pagep->upg_free[i1] &= ~(1ul << i2);
254 254 pagep->upg_nfree++;
255 255 mutex_exit(&state->hs_dbr_lock);
256 256 }
257 257
258 258 /*
259 259 * hermon_dbr_page_alloc()
260 260 * first page allocation - called from attach or open
261 261 * in this case, we want exactly one page per call, and aligned on a
262 262 * page - and may need to be mapped to the user for access
263 263 */
264 264 int
265 265 hermon_dbr_page_alloc(hermon_state_t *state, hermon_dbr_info_t **dinfo)
266 266 {
267 267 int status;
268 268 ddi_dma_handle_t dma_hdl;
269 269 ddi_acc_handle_t acc_hdl;
270 270 ddi_dma_attr_t dma_attr;
271 271 ddi_dma_cookie_t cookie;
272 272 uint_t cookie_cnt;
273 273 int i;
274 274 hermon_dbr_info_t *info;
275 275 caddr_t dmaaddr;
276 276 uint64_t dmalen;
277 277 ulong_t pagesize = PAGESIZE;
278 278
279 279 info = kmem_zalloc(sizeof (hermon_dbr_info_t), KM_SLEEP);
280 280
281 281 /*
282 282 * Initialize many of the default DMA attributes. Then set additional
283 283 * alignment restrictions if necessary for the dbr memory, meaning
284 284 * page aligned. Also use the configured value for IOMMU bypass
285 285 */
286 286 hermon_dma_attr_init(state, &dma_attr);
287 287 dma_attr.dma_attr_align = pagesize;
288 288 dma_attr.dma_attr_sgllen = 1; /* make sure only one cookie */
289 289 #ifdef __sparc
290 290 if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
291 291 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
292 292 #endif
293 293
294 294 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
295 295 DDI_DMA_SLEEP, NULL, &dma_hdl);
296 296 if (status != DDI_SUCCESS) {
297 297 kmem_free((void *)info, sizeof (hermon_dbr_info_t));
298 298 cmn_err(CE_NOTE, "dbr DMA handle alloc failed\n");
299 299 return (DDI_FAILURE);
300 300 }
301 301
302 302 status = ddi_dma_mem_alloc(dma_hdl, pagesize,
303 303 &state->hs_reg_accattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
304 304 NULL, &dmaaddr, (size_t *)&dmalen, &acc_hdl);
305 305 if (status != DDI_SUCCESS) {
306 306 ddi_dma_free_handle(&dma_hdl);
307 307 cmn_err(CE_CONT, "dbr DMA mem alloc failed(status %d)", status);
308 308 kmem_free((void *)info, sizeof (hermon_dbr_info_t));
309 309 return (DDI_FAILURE);
310 310 }
311 311
312 312 /* this memory won't be IB registered, so do the bind here */
313 313 status = ddi_dma_addr_bind_handle(dma_hdl, NULL,
314 314 dmaaddr, (size_t)dmalen, DDI_DMA_RDWR |
315 315 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &cookie, &cookie_cnt);
316 316 if (status != DDI_SUCCESS) {
317 317 ddi_dma_mem_free(&acc_hdl);
318 318 ddi_dma_free_handle(&dma_hdl);
319 319 kmem_free((void *)info, sizeof (hermon_dbr_info_t));
320 320 cmn_err(CE_CONT, "dbr DMA bind handle failed (status %d)",
321 321 status);
322 322 return (DDI_FAILURE);
323 323 }
324 324 *dinfo = info; /* Pass back the pointer */
325 325
326 326 /* init the info structure with returned info */
327 327 info->dbr_dmahdl = dma_hdl;
328 328 info->dbr_acchdl = acc_hdl;
329 329 info->dbr_page = (hermon_dbr_t *)(void *)dmaaddr;
330 330 info->dbr_link = NULL;
331 331 /* extract the phys addr from the cookie */
332 332 info->dbr_paddr = cookie.dmac_laddress;
333 333 info->dbr_firstfree = 0;
334 334 info->dbr_nfree = HERMON_NUM_DBR_PER_PAGE;
335 335 /* link all DBrs onto the free list */
336 336 for (i = 0; i < HERMON_NUM_DBR_PER_PAGE; i++) {
337 337 info->dbr_page[i] = i + 1;
338 338 }
339 339
340 340 return (DDI_SUCCESS);
341 341 }
342 342
343 343
344 344 /*
345 345 * hermon_dbr_alloc()
346 346 * DBr record allocation - called from alloc cq/qp/srq
347 347 * will check for available dbrs in current
348 348 * page - if needed it will allocate another and link them
349 349 */
350 350
351 351 int
352 352 hermon_dbr_alloc(hermon_state_t *state, uint_t index, ddi_acc_handle_t *acchdl,
353 353 hermon_dbr_t **vdbr, uint64_t *pdbr, uint64_t *mapoffset)
354 354 {
355 355 hermon_dbr_t *record = NULL;
356 356 hermon_dbr_info_t *info = NULL;
357 357 uint32_t idx;
358 358 int status;
359 359
360 360 if (index != state->hs_kernel_uar_index)
361 361 return (hermon_user_dbr_alloc(state, index, acchdl, vdbr, pdbr,
362 362 mapoffset));
363 363
364 364 mutex_enter(&state->hs_dbr_lock);
365 365 for (info = state->hs_kern_dbr; info != NULL; info = info->dbr_link)
366 366 if (info->dbr_nfree != 0)
367 367 break; /* found a page w/ one available */
368 368
369 369 if (info == NULL) { /* did NOT find a page with one available */
370 370 status = hermon_dbr_page_alloc(state, &info);
371 371 if (status != DDI_SUCCESS) {
372 372 /* do error handling */
373 373 mutex_exit(&state->hs_dbr_lock);
374 374 return (DDI_FAILURE);
375 375 }
376 376 /* got a new page, so link it in. */
377 377 info->dbr_link = state->hs_kern_dbr;
378 378 state->hs_kern_dbr = info;
379 379 }
380 380 idx = info->dbr_firstfree;
381 381 record = info->dbr_page + idx;
382 382 info->dbr_firstfree = *record;
383 383 info->dbr_nfree--;
384 384 *record = 0;
385 385
386 386 *acchdl = info->dbr_acchdl;
387 387 *vdbr = record;
388 388 *pdbr = info->dbr_paddr + idx * sizeof (hermon_dbr_t);
389 389 mutex_exit(&state->hs_dbr_lock);
390 390 return (DDI_SUCCESS);
391 391 }
392 392
393 393 /*
394 394 * hermon_dbr_free()
395 395 * DBr record deallocation - called from free cq/qp
396 396 * will update the counter in the header, and invalidate
397 397 * the dbr, but will NEVER free pages of dbrs - small
398 398 * price to pay, but userland access never will anyway
399 399 */
400 400 void
401 401 hermon_dbr_free(hermon_state_t *state, uint_t indx, hermon_dbr_t *record)
402 402 {
403 403 hermon_dbr_t *page;
404 404 hermon_dbr_info_t *info;
405 405
406 406 if (indx != state->hs_kernel_uar_index) {
407 407 hermon_user_dbr_free(state, indx, record);
408 408 return;
409 409 }
410 410 page = (hermon_dbr_t *)(uintptr_t)((uintptr_t)record & PAGEMASK);
411 411 mutex_enter(&state->hs_dbr_lock);
412 412 for (info = state->hs_kern_dbr; info != NULL; info = info->dbr_link)
413 413 if (info->dbr_page == page)
414 414 break;
415 415 ASSERT(info != NULL);
416 416 *record = info->dbr_firstfree;
417 417 info->dbr_firstfree = record - info->dbr_page;
418 418 info->dbr_nfree++;
419 419 mutex_exit(&state->hs_dbr_lock);
420 420 }
421 421
422 422 /*
423 423 * hermon_dbr_kern_free()
424 424 * Context: Can be called only from detach context.
425 425 *
426 426 * Free all kernel dbr pages. This includes the freeing of all the dma
427 427 * resources acquired during the allocation of the pages.
428 428 *
429 429 * Also, free all the user dbr pages.
430 430 */
431 431 void
432 432 hermon_dbr_kern_free(hermon_state_t *state)
433 433 {
434 434 hermon_dbr_info_t *info, *link;
435 435 hermon_user_dbr_t *udbr, *next;
436 436 hermon_udbr_page_t *pagep, *nextp;
437 437 hermon_umap_db_entry_t *umapdb;
438 438 int instance, status;
439 439 uint64_t value;
440 440 extern hermon_umap_db_t hermon_userland_rsrc_db;
441 441
442 442 mutex_enter(&state->hs_dbr_lock);
443 443 for (info = state->hs_kern_dbr; info != NULL; info = link) {
444 444 (void) ddi_dma_unbind_handle(info->dbr_dmahdl);
445 445 ddi_dma_mem_free(&info->dbr_acchdl); /* free page */
446 446 ddi_dma_free_handle(&info->dbr_dmahdl);
447 447 link = info->dbr_link;
448 448 kmem_free(info, sizeof (hermon_dbr_info_t));
449 449 }
450 450
451 451 udbr = state->hs_user_dbr;
452 452 instance = state->hs_instance;
453 453 mutex_enter(&hermon_userland_rsrc_db.hdl_umapdb_lock);
454 454 while (udbr != NULL) {
455 455 pagep = udbr->udbr_pagep;
456 456 while (pagep != NULL) {
457 457 /* probably need to remove "db" */
458 458 (void) ddi_dma_unbind_handle(pagep->upg_dmahdl);
459 459 ddi_dma_free_handle(&pagep->upg_dmahdl);
460 460 freerbuf(pagep->upg_buf);
461 461 ddi_umem_free(pagep->upg_umemcookie);
462 462 status = hermon_umap_db_find_nolock(instance,
463 463 HERMON_DBR_KEY(udbr->udbr_index,
464 464 pagep->upg_index), MLNX_UMAP_DBRMEM_RSRC,
465 465 &value, HERMON_UMAP_DB_REMOVE, &umapdb);
466 466 if (status == DDI_SUCCESS)
467 467 hermon_umap_db_free(umapdb);
468 468 kmem_free(pagep->upg_free,
469 469 PAGESIZE / sizeof (hermon_dbr_t) / 8);
470 470 nextp = pagep->upg_link;
471 471 kmem_free(pagep, sizeof (*pagep));
472 472 pagep = nextp;
473 473 }
474 474 next = udbr->udbr_link;
475 475 kmem_free(udbr, sizeof (*udbr));
476 476 udbr = next;
477 477 }
478 478 mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
479 479 mutex_exit(&state->hs_dbr_lock);
480 480 }
481 481
482 482 /*
483 483 * hermon_ah_alloc()
484 484 * Context: Can be called only from user or kernel context.
485 485 */
486 486 int
487 487 hermon_ah_alloc(hermon_state_t *state, hermon_pdhdl_t pd,
488 488 ibt_adds_vect_t *attr_p, hermon_ahhdl_t *ahhdl, uint_t sleepflag)
489 489 {
490 490 hermon_rsrc_t *rsrc;
491 491 hermon_hw_udav_t *udav;
492 492 hermon_ahhdl_t ah;
493 493 int status;
494 494
495 495 /*
496 496 * Someday maybe the "ibt_adds_vect_t *attr_p" will be NULL to
497 497 * indicate that we wish to allocate an "invalid" (i.e. empty)
498 498 * address handle XXX
499 499 */
500 500
501 501 /* Validate that specified port number is legal */
502 502 if (!hermon_portnum_is_valid(state, attr_p->av_port_num)) {
503 503 return (IBT_HCA_PORT_INVALID);
504 504 }
↓ open down ↓ |
504 lines elided |
↑ open up ↑ |
505 505
506 506 /*
507 507 * Allocate the software structure for tracking the address handle
508 508 * (i.e. the Hermon Address Handle struct).
509 509 */
510 510 status = hermon_rsrc_alloc(state, HERMON_AHHDL, 1, sleepflag, &rsrc);
511 511 if (status != DDI_SUCCESS) {
512 512 return (IBT_INSUFF_RESOURCE);
513 513 }
514 514 ah = (hermon_ahhdl_t)rsrc->hr_addr;
515 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ah))
516 515
517 516 /* Increment the reference count on the protection domain (PD) */
518 517 hermon_pd_refcnt_inc(pd);
519 518
520 519 udav = (hermon_hw_udav_t *)kmem_zalloc(sizeof (hermon_hw_udav_t),
521 520 KM_SLEEP);
522 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*udav))
523 521
524 522 /*
525 523 * Fill in the UDAV data. We first zero out the UDAV, then populate
526 524 * it by then calling hermon_set_addr_path() to fill in the common
527 525 * portions that can be pulled from the "ibt_adds_vect_t" passed in
528 526 */
529 527 status = hermon_set_addr_path(state, attr_p,
530 528 (hermon_hw_addr_path_t *)udav, HERMON_ADDRPATH_UDAV);
531 529 if (status != DDI_SUCCESS) {
532 530 hermon_pd_refcnt_dec(pd);
533 531 hermon_rsrc_free(state, &rsrc);
534 532 return (status);
535 533 }
536 534 udav->pd = pd->pd_pdnum;
537 535 udav->sl = attr_p->av_srvl;
538 536
539 537 /*
540 538 * Fill in the rest of the Hermon Address Handle struct.
541 539 *
542 540 * NOTE: We are saving away a copy of the "av_dgid.gid_guid" field
543 541 * here because we may need to return it later to the IBTF (as a
544 542 * result of a subsequent query operation). Unlike the other UDAV
545 543 * parameters, the value of "av_dgid.gid_guid" is not always preserved.
546 544 * The reason for this is described in hermon_set_addr_path().
547 545 */
548 546 ah->ah_rsrcp = rsrc;
549 547 ah->ah_pdhdl = pd;
550 548 ah->ah_udav = udav;
551 549 ah->ah_save_guid = attr_p->av_dgid.gid_guid;
552 550 *ahhdl = ah;
553 551
554 552 return (DDI_SUCCESS);
555 553 }
556 554
557 555
558 556 /*
559 557 * hermon_ah_free()
560 558 * Context: Can be called only from user or kernel context.
561 559 */
562 560 /* ARGSUSED */
563 561 int
564 562 hermon_ah_free(hermon_state_t *state, hermon_ahhdl_t *ahhdl, uint_t sleepflag)
565 563 {
566 564 hermon_rsrc_t *rsrc;
567 565 hermon_pdhdl_t pd;
568 566 hermon_ahhdl_t ah;
569 567
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
570 568 /*
571 569 * Pull all the necessary information from the Hermon Address Handle
572 570 * struct. This is necessary here because the resource for the
573 571 * AH is going to be freed up as part of this operation.
574 572 */
575 573 ah = *ahhdl;
576 574 mutex_enter(&ah->ah_lock);
577 575 rsrc = ah->ah_rsrcp;
578 576 pd = ah->ah_pdhdl;
579 577 mutex_exit(&ah->ah_lock);
580 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ah))
581 578
582 579 /* Free the UDAV memory */
583 580 kmem_free(ah->ah_udav, sizeof (hermon_hw_udav_t));
584 581
585 582 /* Decrement the reference count on the protection domain (PD) */
586 583 hermon_pd_refcnt_dec(pd);
587 584
588 585 /* Free the Hermon Address Handle structure */
589 586 hermon_rsrc_free(state, &rsrc);
590 587
591 588 /* Set the ahhdl pointer to NULL and return success */
592 589 *ahhdl = NULL;
593 590
594 591 return (DDI_SUCCESS);
595 592 }
596 593
597 594
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
598 595 /*
599 596 * hermon_ah_query()
600 597 * Context: Can be called from interrupt or base context.
601 598 */
602 599 /* ARGSUSED */
603 600 int
604 601 hermon_ah_query(hermon_state_t *state, hermon_ahhdl_t ah, hermon_pdhdl_t *pd,
605 602 ibt_adds_vect_t *attr_p)
606 603 {
607 604 mutex_enter(&ah->ah_lock);
608 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
609 605
610 606 /*
611 607 * Pull the PD and UDAV from the Hermon Address Handle structure
612 608 */
613 609 *pd = ah->ah_pdhdl;
614 610
615 611 /*
616 612 * Fill in "ibt_adds_vect_t". We call hermon_get_addr_path() to fill
617 613 * the common portions that can be pulled from the UDAV we pass in.
618 614 *
619 615 * NOTE: We will also fill the "av_dgid.gid_guid" field from the
620 616 * "ah_save_guid" field we have previously saved away. The reason
621 617 * for this is described in hermon_ah_alloc() and hermon_ah_modify().
622 618 */
623 619 hermon_get_addr_path(state, (hermon_hw_addr_path_t *)ah->ah_udav,
624 620 attr_p, HERMON_ADDRPATH_UDAV);
625 621
626 622 attr_p->av_dgid.gid_guid = ah->ah_save_guid;
627 623
628 624 mutex_exit(&ah->ah_lock);
629 625 return (DDI_SUCCESS);
630 626 }
631 627
632 628
633 629 /*
634 630 * hermon_ah_modify()
635 631 * Context: Can be called from interrupt or base context.
636 632 */
637 633 /* ARGSUSED */
638 634 int
639 635 hermon_ah_modify(hermon_state_t *state, hermon_ahhdl_t ah,
640 636 ibt_adds_vect_t *attr_p)
641 637 {
642 638 hermon_hw_udav_t old_udav;
643 639 uint64_t data_old;
644 640 int status, size, i;
645 641
646 642 /* Validate that specified port number is legal */
647 643 if (!hermon_portnum_is_valid(state, attr_p->av_port_num)) {
648 644 return (IBT_HCA_PORT_INVALID);
649 645 }
650 646
651 647 mutex_enter(&ah->ah_lock);
652 648
653 649 /* Save a copy of the current UDAV data in old_udav. */
654 650 bcopy(ah->ah_udav, &old_udav, sizeof (hermon_hw_udav_t));
655 651
656 652 /*
657 653 * Fill in the new UDAV with the caller's data, passed in via the
658 654 * "ibt_adds_vect_t" structure.
659 655 *
660 656 * NOTE: We also need to save away a copy of the "av_dgid.gid_guid"
661 657 * field here (just as we did during hermon_ah_alloc()) because we
662 658 * may need to return it later to the IBTF (as a result of a
663 659 * subsequent query operation). As explained in hermon_ah_alloc(),
664 660 * unlike the other UDAV parameters, the value of "av_dgid.gid_guid"
↓ open down ↓ |
46 lines elided |
↑ open up ↑ |
665 661 * is not always preserved. The reason for this is described in
666 662 * hermon_set_addr_path().
667 663 */
668 664 status = hermon_set_addr_path(state, attr_p,
669 665 (hermon_hw_addr_path_t *)ah->ah_udav, HERMON_ADDRPATH_UDAV);
670 666 if (status != DDI_SUCCESS) {
671 667 mutex_exit(&ah->ah_lock);
672 668 return (status);
673 669 }
674 670 ah->ah_save_guid = attr_p->av_dgid.gid_guid;
675 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*(ah->ah_udav)))
676 671 ah->ah_udav->sl = attr_p->av_srvl;
677 672
678 673 /*
679 674 * Copy changes into the new UDAV.
680 675 * Note: We copy in 64-bit chunks. For the first two of these
681 676 * chunks it is necessary to read the current contents of the
682 677 * UDAV, mask off the modifiable portions (maintaining any
683 678 * of the "reserved" portions), and then mask on the new data.
684 679 */
685 680 size = sizeof (hermon_hw_udav_t) >> 3;
686 681 for (i = 0; i < size; i++) {
687 682 data_old = ((uint64_t *)&old_udav)[i];
688 683
689 684 /*
690 685 * Apply mask to change only the relevant values.
691 686 */
692 687 if (i == 0) {
693 688 data_old = data_old & HERMON_UDAV_MODIFY_MASK0;
694 689 } else if (i == 1) {
695 690 data_old = data_old & HERMON_UDAV_MODIFY_MASK1;
696 691 } else {
697 692 data_old = 0;
698 693 }
699 694
700 695 /* Store the updated values to the UDAV */
701 696 ((uint64_t *)ah->ah_udav)[i] |= data_old;
702 697 }
703 698
704 699 /*
705 700 * Put the valid PD number back into the UDAV entry, as it
706 701 * might have been clobbered above.
707 702 */
708 703 ah->ah_udav->pd = old_udav.pd;
709 704
710 705
711 706 mutex_exit(&ah->ah_lock);
712 707 return (DDI_SUCCESS);
713 708 }
714 709
715 710 /*
716 711 * hermon_mcg_attach()
717 712 * Context: Can be called only from user or kernel context.
718 713 */
719 714 int
720 715 hermon_mcg_attach(hermon_state_t *state, hermon_qphdl_t qp, ib_gid_t gid,
721 716 ib_lid_t lid)
722 717 {
723 718 hermon_rsrc_t *rsrc;
724 719 hermon_hw_mcg_t *mcg_entry;
725 720 hermon_hw_mcg_qp_list_t *mcg_entry_qplist;
726 721 hermon_mcghdl_t mcg, newmcg;
727 722 uint64_t mgid_hash;
728 723 uint32_t end_indx;
729 724 int status;
730 725 uint_t qp_found;
731 726
732 727 /*
733 728 * It is only allowed to attach MCG to UD queue pairs. Verify
734 729 * that the intended QP is of the appropriate transport type
735 730 */
736 731 if (qp->qp_serv_type != HERMON_QP_UD) {
737 732 return (IBT_QP_SRV_TYPE_INVALID);
738 733 }
739 734
740 735 /*
741 736 * Check for invalid Multicast DLID. Specifically, all Multicast
742 737 * LIDs should be within a well defined range. If the specified LID
743 738 * is outside of that range, then return an error.
744 739 */
745 740 if (hermon_mlid_is_valid(lid) == 0) {
746 741 return (IBT_MC_MLID_INVALID);
747 742 }
748 743 /*
749 744 * Check for invalid Multicast GID. All Multicast GIDs should have
750 745 * a well-defined pattern of bits and flags that are allowable. If
751 746 * the specified GID does not meet the criteria, then return an error.
752 747 */
753 748 if (hermon_mgid_is_valid(gid) == 0) {
754 749 return (IBT_MC_MGID_INVALID);
755 750 }
756 751
757 752 /*
758 753 * Compute the MGID hash value. Since the MCG table is arranged as
759 754 * a number of separate hash chains, this operation converts the
760 755 * specified MGID into the starting index of an entry in the hash
761 756 * table (i.e. the index for the start of the appropriate hash chain).
762 757 * Subsequent operations below will walk the chain searching for the
763 758 * right place to add this new QP.
764 759 */
765 760 status = hermon_mgid_hash_cmd_post(state, gid.gid_prefix, gid.gid_guid,
766 761 &mgid_hash, HERMON_SLEEPFLAG_FOR_CONTEXT());
767 762 if (status != HERMON_CMD_SUCCESS) {
768 763 cmn_err(CE_CONT, "Hermon: MGID_HASH command failed: %08x\n",
769 764 status);
770 765 if (status == HERMON_CMD_INVALID_STATUS) {
771 766 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
772 767 }
773 768 return (ibc_get_ci_failure(0));
774 769 }
775 770
776 771 /*
777 772 * Grab the multicast group mutex. Then grab the pre-allocated
778 773 * temporary buffer used for holding and/or modifying MCG entries.
779 774 * Zero out the temporary MCG entry before we begin.
780 775 */
781 776 mutex_enter(&state->hs_mcglock);
782 777 mcg_entry = state->hs_mcgtmp;
783 778 mcg_entry_qplist = HERMON_MCG_GET_QPLIST_PTR(mcg_entry);
784 779 bzero(mcg_entry, HERMON_MCGMEM_SZ(state));
785 780
786 781 /*
787 782 * Walk through the array of MCG entries starting at "mgid_hash".
788 783 * Try to find the appropriate place for this new QP to be added.
789 784 * This could happen when the first entry of the chain has MGID == 0
790 785 * (which means that the hash chain is empty), or because we find
791 786 * an entry with the same MGID (in which case we'll add the QP to
792 787 * that MCG), or because we come to the end of the chain (in which
793 788 * case this is the first QP being added to the multicast group that
794 789 * corresponds to the MGID. The hermon_mcg_walk_mgid_hash() routine
795 790 * walks the list and returns an index into the MCG table. The entry
796 791 * at this index is then checked to determine which case we have
797 792 * fallen into (see below). Note: We are using the "shadow" MCG
798 793 * list (of hermon_mcg_t structs) for this lookup because the real
799 794 * MCG entries are in hardware (and the lookup process would be much
800 795 * more time consuming).
801 796 */
802 797 end_indx = hermon_mcg_walk_mgid_hash(state, mgid_hash, gid, NULL);
803 798 mcg = &state->hs_mcghdl[end_indx];
804 799
805 800 /*
806 801 * If MGID == 0, then the hash chain is empty. Just fill in the
807 802 * current entry. Note: No need to allocate an MCG table entry
808 803 * as all the hash chain "heads" are already preallocated.
809 804 */
810 805 if ((mcg->mcg_mgid_h == 0) && (mcg->mcg_mgid_l == 0)) {
811 806
812 807 /* Fill in the current entry in the "shadow" MCG list */
813 808 hermon_mcg_setup_new_hdr(mcg, mcg_entry, gid, NULL);
814 809
815 810 /*
816 811 * Try to add the new QP number to the list. This (and the
817 812 * above) routine fills in a temporary MCG. The "mcg_entry"
818 813 * and "mcg_entry_qplist" pointers simply point to different
819 814 * offsets within the same temporary copy of the MCG (for
820 815 * convenience). Note: If this fails, we need to invalidate
821 816 * the entries we've already put into the "shadow" list entry
822 817 * above.
823 818 */
824 819 status = hermon_mcg_qplist_add(state, mcg, mcg_entry_qplist, qp,
825 820 &qp_found);
826 821 if (status != DDI_SUCCESS) {
827 822 bzero(mcg, sizeof (struct hermon_sw_mcg_list_s));
828 823 mutex_exit(&state->hs_mcglock);
829 824 return (status);
830 825 }
831 826 if (!qp_found)
832 827 mcg_entry->member_cnt = (mcg->mcg_num_qps + 1);
833 828 /* set the member count */
834 829
835 830 /*
836 831 * Once the temporary MCG has been filled in, write the entry
837 832 * into the appropriate location in the Hermon MCG entry table.
838 833 * If it's successful, then drop the lock and return success.
839 834 * Note: In general, this operation shouldn't fail. If it
840 835 * does, then it is an indication that something (probably in
841 836 * HW, but maybe in SW) has gone seriously wrong. We still
842 837 * want to zero out the entries that we've filled in above
843 838 * (in the hermon_mcg_setup_new_hdr() routine).
844 839 */
845 840 status = hermon_write_mgm_cmd_post(state, mcg_entry, end_indx,
846 841 HERMON_CMD_NOSLEEP_SPIN);
847 842 if (status != HERMON_CMD_SUCCESS) {
848 843 bzero(mcg, sizeof (struct hermon_sw_mcg_list_s));
849 844 mutex_exit(&state->hs_mcglock);
850 845 HERMON_WARNING(state, "failed to write MCG entry");
851 846 cmn_err(CE_CONT, "Hermon: WRITE_MGM command failed: "
852 847 "%08x\n", status);
853 848 if (status == HERMON_CMD_INVALID_STATUS) {
854 849 hermon_fm_ereport(state, HCA_SYS_ERR,
855 850 HCA_ERR_SRV_LOST);
856 851 }
857 852 return (ibc_get_ci_failure(0));
858 853 }
859 854
860 855 /*
861 856 * Now that we know all the Hermon firmware accesses have been
862 857 * successful, we update the "shadow" MCG entry by incrementing
863 858 * the "number of attached QPs" count.
864 859 *
865 860 * We increment only if the QP is not already part of the
866 861 * MCG by checking the 'qp_found' flag returned from the
867 862 * qplist_add above.
868 863 */
869 864 if (!qp_found) {
870 865 mcg->mcg_num_qps++;
871 866
872 867 /*
873 868 * Increment the refcnt for this QP. Because the QP
874 869 * was added to this MCG, the refcnt must be
875 870 * incremented.
876 871 */
877 872 hermon_qp_mcg_refcnt_inc(qp);
878 873 }
879 874
880 875 /*
881 876 * We drop the lock and return success.
882 877 */
883 878 mutex_exit(&state->hs_mcglock);
884 879 return (DDI_SUCCESS);
885 880 }
886 881
887 882 /*
888 883 * If the specified MGID matches the MGID in the current entry, then
889 884 * we need to try to add the QP to the current MCG entry. In this
890 885 * case, it means that we need to read the existing MCG entry (into
891 886 * the temporary MCG), add the new QP number to the temporary entry
892 887 * (using the same method we used above), and write the entry back
893 888 * to the hardware (same as above).
894 889 */
895 890 if ((mcg->mcg_mgid_h == gid.gid_prefix) &&
896 891 (mcg->mcg_mgid_l == gid.gid_guid)) {
897 892
898 893 /*
899 894 * Read the current MCG entry into the temporary MCG. Note:
900 895 * In general, this operation shouldn't fail. If it does,
901 896 * then it is an indication that something (probably in HW,
902 897 * but maybe in SW) has gone seriously wrong.
903 898 */
904 899 status = hermon_read_mgm_cmd_post(state, mcg_entry, end_indx,
905 900 HERMON_CMD_NOSLEEP_SPIN);
906 901 if (status != HERMON_CMD_SUCCESS) {
907 902 mutex_exit(&state->hs_mcglock);
908 903 HERMON_WARNING(state, "failed to read MCG entry");
909 904 cmn_err(CE_CONT, "Hermon: READ_MGM command failed: "
910 905 "%08x\n", status);
911 906 if (status == HERMON_CMD_INVALID_STATUS) {
912 907 hermon_fm_ereport(state, HCA_SYS_ERR,
913 908 HCA_ERR_SRV_LOST);
914 909 }
915 910 return (ibc_get_ci_failure(0));
916 911 }
917 912
918 913 /*
919 914 * Try to add the new QP number to the list. This routine
920 915 * fills in the necessary pieces of the temporary MCG. The
921 916 * "mcg_entry_qplist" pointer is used to point to the portion
922 917 * of the temporary MCG that holds the QP numbers.
923 918 *
924 919 * Note: hermon_mcg_qplist_add() returns SUCCESS if it
925 920 * already found the QP in the list. In this case, the QP is
926 921 * not added on to the list again. Check the flag 'qp_found'
927 922 * if this value is needed to be known.
928 923 *
929 924 */
930 925 status = hermon_mcg_qplist_add(state, mcg, mcg_entry_qplist, qp,
931 926 &qp_found);
932 927 if (status != DDI_SUCCESS) {
933 928 mutex_exit(&state->hs_mcglock);
934 929 return (status);
935 930 }
936 931 if (!qp_found)
937 932 mcg_entry->member_cnt = (mcg->mcg_num_qps + 1);
938 933 /* set the member count */
939 934
940 935 /*
941 936 * Once the temporary MCG has been updated, write the entry
942 937 * into the appropriate location in the Hermon MCG entry table.
943 938 * If it's successful, then drop the lock and return success.
944 939 * Note: In general, this operation shouldn't fail. If it
945 940 * does, then it is an indication that something (probably in
946 941 * HW, but maybe in SW) has gone seriously wrong.
947 942 */
948 943 status = hermon_write_mgm_cmd_post(state, mcg_entry, end_indx,
949 944 HERMON_CMD_NOSLEEP_SPIN);
950 945 if (status != HERMON_CMD_SUCCESS) {
951 946 mutex_exit(&state->hs_mcglock);
952 947 HERMON_WARNING(state, "failed to write MCG entry");
953 948 cmn_err(CE_CONT, "Hermon: WRITE_MGM command failed: "
954 949 "%08x\n", status);
955 950 if (status == HERMON_CMD_INVALID_STATUS) {
956 951 hermon_fm_ereport(state, HCA_SYS_ERR,
957 952 HCA_ERR_SRV_LOST);
958 953 }
959 954 return (ibc_get_ci_failure(0));
960 955 }
961 956
962 957 /*
963 958 * Now that we know all the Hermon firmware accesses have been
964 959 * successful, we update the current "shadow" MCG entry by
965 960 * incrementing the "number of attached QPs" count.
966 961 *
967 962 * We increment only if the QP is not already part of the
968 963 * MCG by checking the 'qp_found' flag returned
969 964 * hermon_mcg_walk_mgid_hashfrom the qplist_add above.
970 965 */
971 966 if (!qp_found) {
972 967 mcg->mcg_num_qps++;
973 968
974 969 /*
975 970 * Increment the refcnt for this QP. Because the QP
976 971 * was added to this MCG, the refcnt must be
977 972 * incremented.
978 973 */
979 974 hermon_qp_mcg_refcnt_inc(qp);
980 975 }
981 976
982 977 /*
983 978 * We drop the lock and return success.
984 979 */
985 980 mutex_exit(&state->hs_mcglock);
986 981 return (DDI_SUCCESS);
987 982 }
988 983
989 984 /*
990 985 * If we've reached here, then we're at the end of the hash chain.
991 986 * We need to allocate a new MCG entry, fill it in, write it to Hermon,
992 987 * and update the previous entry to link the new one to the end of the
993 988 * chain.
994 989 */
995 990
996 991 /*
997 992 * Allocate an MCG table entry. This will be filled in with all
998 993 * the necessary parameters to define the multicast group. Then it
999 994 * will be written to the hardware in the next-to-last step below.
1000 995 */
1001 996 status = hermon_rsrc_alloc(state, HERMON_MCG, 1, HERMON_NOSLEEP, &rsrc);
1002 997 if (status != DDI_SUCCESS) {
1003 998 mutex_exit(&state->hs_mcglock);
1004 999 return (IBT_INSUFF_RESOURCE);
1005 1000 }
1006 1001
1007 1002 /*
1008 1003 * Fill in the new entry in the "shadow" MCG list. Note: Just as
1009 1004 * it does above, hermon_mcg_setup_new_hdr() also fills in a portion
1010 1005 * of the temporary MCG entry (the rest of which will be filled in by
1011 1006 * hermon_mcg_qplist_add() below)
1012 1007 */
1013 1008 newmcg = &state->hs_mcghdl[rsrc->hr_indx];
1014 1009 hermon_mcg_setup_new_hdr(newmcg, mcg_entry, gid, rsrc);
1015 1010
1016 1011 /*
1017 1012 * Try to add the new QP number to the list. This routine fills in
1018 1013 * the final necessary pieces of the temporary MCG. The
1019 1014 * "mcg_entry_qplist" pointer is used to point to the portion of the
1020 1015 * temporary MCG that holds the QP numbers. If we fail here, we
1021 1016 * must undo the previous resource allocation.
1022 1017 *
1023 1018 * Note: hermon_mcg_qplist_add() can we return SUCCESS if it already
1024 1019 * found the QP in the list. In this case, the QP is not added on to
1025 1020 * the list again. Check the flag 'qp_found' if this value is needed
1026 1021 * to be known.
1027 1022 */
1028 1023 status = hermon_mcg_qplist_add(state, newmcg, mcg_entry_qplist, qp,
1029 1024 &qp_found);
1030 1025 if (status != DDI_SUCCESS) {
1031 1026 bzero(newmcg, sizeof (struct hermon_sw_mcg_list_s));
1032 1027 hermon_rsrc_free(state, &rsrc);
1033 1028 mutex_exit(&state->hs_mcglock);
1034 1029 return (status);
1035 1030 }
1036 1031 mcg_entry->member_cnt = (newmcg->mcg_num_qps + 1);
1037 1032 /* set the member count */
1038 1033
1039 1034 /*
1040 1035 * Once the temporary MCG has been updated, write the entry into the
1041 1036 * appropriate location in the Hermon MCG entry table. If this is
1042 1037 * successful, then we need to chain the previous entry to this one.
1043 1038 * Note: In general, this operation shouldn't fail. If it does, then
1044 1039 * it is an indication that something (probably in HW, but maybe in
1045 1040 * SW) has gone seriously wrong.
1046 1041 */
1047 1042 status = hermon_write_mgm_cmd_post(state, mcg_entry, rsrc->hr_indx,
1048 1043 HERMON_CMD_NOSLEEP_SPIN);
1049 1044 if (status != HERMON_CMD_SUCCESS) {
1050 1045 bzero(newmcg, sizeof (struct hermon_sw_mcg_list_s));
1051 1046 hermon_rsrc_free(state, &rsrc);
1052 1047 mutex_exit(&state->hs_mcglock);
1053 1048 HERMON_WARNING(state, "failed to write MCG entry");
1054 1049 cmn_err(CE_CONT, "Hermon: WRITE_MGM command failed: %08x\n",
1055 1050 status);
1056 1051 if (status == HERMON_CMD_INVALID_STATUS) {
1057 1052 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
1058 1053 }
1059 1054 return (ibc_get_ci_failure(0));
1060 1055 }
1061 1056
1062 1057 /*
1063 1058 * Now read the current MCG entry (the one previously at the end of
1064 1059 * hash chain) into the temporary MCG. We are going to update its
1065 1060 * "next_gid_indx" now and write the entry back to the MCG table.
1066 1061 * Note: In general, this operation shouldn't fail. If it does, then
1067 1062 * it is an indication that something (probably in HW, but maybe in SW)
1068 1063 * has gone seriously wrong. We will free up the MCG entry resource,
1069 1064 * but we will not undo the previously written MCG entry in the HW.
1070 1065 * This is OK, though, because the MCG entry is not currently attached
1071 1066 * to any hash chain.
1072 1067 */
1073 1068 status = hermon_read_mgm_cmd_post(state, mcg_entry, end_indx,
1074 1069 HERMON_CMD_NOSLEEP_SPIN);
1075 1070 if (status != HERMON_CMD_SUCCESS) {
1076 1071 bzero(newmcg, sizeof (struct hermon_sw_mcg_list_s));
1077 1072 hermon_rsrc_free(state, &rsrc);
1078 1073 mutex_exit(&state->hs_mcglock);
1079 1074 HERMON_WARNING(state, "failed to read MCG entry");
1080 1075 cmn_err(CE_CONT, "Hermon: READ_MGM command failed: %08x\n",
1081 1076 status);
1082 1077 if (status == HERMON_CMD_INVALID_STATUS) {
1083 1078 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
1084 1079 }
1085 1080 return (ibc_get_ci_failure(0));
1086 1081 }
1087 1082
1088 1083 /*
1089 1084 * Finally, we update the "next_gid_indx" field in the temporary MCG
1090 1085 * and attempt to write the entry back into the Hermon MCG table. If
1091 1086 * this succeeds, then we update the "shadow" list to reflect the
1092 1087 * change, drop the lock, and return success. Note: In general, this
1093 1088 * operation shouldn't fail. If it does, then it is an indication
1094 1089 * that something (probably in HW, but maybe in SW) has gone seriously
1095 1090 * wrong. Just as we do above, we will free up the MCG entry resource,
1096 1091 * but we will not try to undo the previously written MCG entry. This
1097 1092 * is OK, though, because (since we failed here to update the end of
1098 1093 * the chain) that other entry is not currently attached to any chain.
1099 1094 */
1100 1095 mcg_entry->next_gid_indx = rsrc->hr_indx;
1101 1096 status = hermon_write_mgm_cmd_post(state, mcg_entry, end_indx,
1102 1097 HERMON_CMD_NOSLEEP_SPIN);
1103 1098 if (status != HERMON_CMD_SUCCESS) {
1104 1099 bzero(newmcg, sizeof (struct hermon_sw_mcg_list_s));
1105 1100 hermon_rsrc_free(state, &rsrc);
1106 1101 mutex_exit(&state->hs_mcglock);
1107 1102 HERMON_WARNING(state, "failed to write MCG entry");
1108 1103 cmn_err(CE_CONT, "Hermon: WRITE_MGM command failed: %08x\n",
1109 1104 status);
1110 1105 if (status == HERMON_CMD_INVALID_STATUS) {
1111 1106 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
1112 1107 }
1113 1108 return (ibc_get_ci_failure(0));
1114 1109 }
1115 1110 mcg = &state->hs_mcghdl[end_indx];
1116 1111 mcg->mcg_next_indx = rsrc->hr_indx;
1117 1112
1118 1113 /*
1119 1114 * Now that we know all the Hermon firmware accesses have been
1120 1115 * successful, we update the new "shadow" MCG entry by incrementing
1121 1116 * the "number of attached QPs" count. Then we drop the lock and
1122 1117 * return success.
1123 1118 */
1124 1119 newmcg->mcg_num_qps++;
1125 1120
1126 1121 /*
1127 1122 * Increment the refcnt for this QP. Because the QP
1128 1123 * was added to this MCG, the refcnt must be
1129 1124 * incremented.
1130 1125 */
1131 1126 hermon_qp_mcg_refcnt_inc(qp);
1132 1127
1133 1128 mutex_exit(&state->hs_mcglock);
1134 1129 return (DDI_SUCCESS);
1135 1130 }
1136 1131
1137 1132
1138 1133 /*
1139 1134 * hermon_mcg_detach()
1140 1135 * Context: Can be called only from user or kernel context.
1141 1136 */
1142 1137 int
1143 1138 hermon_mcg_detach(hermon_state_t *state, hermon_qphdl_t qp, ib_gid_t gid,
1144 1139 ib_lid_t lid)
1145 1140 {
1146 1141 hermon_hw_mcg_t *mcg_entry;
1147 1142 hermon_hw_mcg_qp_list_t *mcg_entry_qplist;
1148 1143 hermon_mcghdl_t mcg;
1149 1144 uint64_t mgid_hash;
1150 1145 uint32_t end_indx, prev_indx;
1151 1146 int status;
1152 1147
1153 1148 /*
1154 1149 * Check for invalid Multicast DLID. Specifically, all Multicast
1155 1150 * LIDs should be within a well defined range. If the specified LID
1156 1151 * is outside of that range, then return an error.
1157 1152 */
1158 1153 if (hermon_mlid_is_valid(lid) == 0) {
1159 1154 return (IBT_MC_MLID_INVALID);
1160 1155 }
1161 1156
1162 1157 /*
1163 1158 * Compute the MGID hash value. As described above, the MCG table is
1164 1159 * arranged as a number of separate hash chains. This operation
1165 1160 * converts the specified MGID into the starting index of an entry in
1166 1161 * the hash table (i.e. the index for the start of the appropriate
1167 1162 * hash chain). Subsequent operations below will walk the chain
1168 1163 * searching for a matching entry from which to attempt to remove
1169 1164 * the specified QP.
1170 1165 */
1171 1166 status = hermon_mgid_hash_cmd_post(state, gid.gid_prefix, gid.gid_guid,
1172 1167 &mgid_hash, HERMON_SLEEPFLAG_FOR_CONTEXT());
1173 1168 if (status != HERMON_CMD_SUCCESS) {
1174 1169 cmn_err(CE_CONT, "Hermon: MGID_HASH command failed: %08x\n",
1175 1170 status);
1176 1171 if (status == HERMON_CMD_INVALID_STATUS) {
1177 1172 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
1178 1173 }
1179 1174 return (ibc_get_ci_failure(0));
1180 1175 }
1181 1176
1182 1177 /*
1183 1178 * Grab the multicast group mutex. Then grab the pre-allocated
1184 1179 * temporary buffer used for holding and/or modifying MCG entries.
1185 1180 */
1186 1181 mutex_enter(&state->hs_mcglock);
1187 1182 mcg_entry = state->hs_mcgtmp;
1188 1183 mcg_entry_qplist = HERMON_MCG_GET_QPLIST_PTR(mcg_entry);
1189 1184
1190 1185 /*
1191 1186 * Walk through the array of MCG entries starting at "mgid_hash".
1192 1187 * Try to find an MCG entry with a matching MGID. The
1193 1188 * hermon_mcg_walk_mgid_hash() routine walks the list and returns an
1194 1189 * index into the MCG table. The entry at this index is checked to
1195 1190 * determine whether it is a match or not. If it is a match, then
1196 1191 * we continue on to attempt to remove the QP from the MCG. If it
1197 1192 * is not a match (or not a valid MCG entry), then we return an error.
1198 1193 */
1199 1194 end_indx = hermon_mcg_walk_mgid_hash(state, mgid_hash, gid, &prev_indx);
1200 1195 mcg = &state->hs_mcghdl[end_indx];
1201 1196
1202 1197 /*
1203 1198 * If MGID == 0 (the hash chain is empty) or if the specified MGID
1204 1199 * does not match the MGID in the current entry, then return
1205 1200 * IBT_MC_MGID_INVALID (to indicate that the specified MGID is not
1206 1201 * valid).
1207 1202 */
1208 1203 if (((mcg->mcg_mgid_h == 0) && (mcg->mcg_mgid_l == 0)) ||
1209 1204 ((mcg->mcg_mgid_h != gid.gid_prefix) ||
1210 1205 (mcg->mcg_mgid_l != gid.gid_guid))) {
1211 1206 mutex_exit(&state->hs_mcglock);
1212 1207 return (IBT_MC_MGID_INVALID);
1213 1208 }
1214 1209
1215 1210 /*
1216 1211 * Read the current MCG entry into the temporary MCG. Note: In
1217 1212 * general, this operation shouldn't fail. If it does, then it is
1218 1213 * an indication that something (probably in HW, but maybe in SW)
1219 1214 * has gone seriously wrong.
1220 1215 */
1221 1216 status = hermon_read_mgm_cmd_post(state, mcg_entry, end_indx,
1222 1217 HERMON_CMD_NOSLEEP_SPIN);
1223 1218 if (status != HERMON_CMD_SUCCESS) {
1224 1219 mutex_exit(&state->hs_mcglock);
1225 1220 HERMON_WARNING(state, "failed to read MCG entry");
1226 1221 cmn_err(CE_CONT, "Hermon: READ_MGM command failed: %08x\n",
1227 1222 status);
1228 1223 if (status == HERMON_CMD_INVALID_STATUS) {
1229 1224 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
1230 1225 }
1231 1226 return (ibc_get_ci_failure(0));
1232 1227 }
1233 1228
1234 1229 /*
1235 1230 * Search the QP number list for a match. If a match is found, then
1236 1231 * remove the entry from the QP list. Otherwise, if no match is found,
1237 1232 * return an error.
1238 1233 */
1239 1234 status = hermon_mcg_qplist_remove(mcg, mcg_entry_qplist, qp);
1240 1235 if (status != DDI_SUCCESS) {
1241 1236 mutex_exit(&state->hs_mcglock);
1242 1237 return (status);
1243 1238 }
1244 1239
1245 1240 /*
1246 1241 * Decrement the MCG count for this QP. When the 'qp_mcg'
1247 1242 * field becomes 0, then this QP is no longer a member of any
1248 1243 * MCG.
1249 1244 */
1250 1245 hermon_qp_mcg_refcnt_dec(qp);
1251 1246
1252 1247 /*
1253 1248 * If the current MCG's QP number list is about to be made empty
1254 1249 * ("mcg_num_qps" == 1), then remove the entry itself from the hash
1255 1250 * chain. Otherwise, just write the updated MCG entry back to the
1256 1251 * hardware. In either case, once we successfully update the hardware
1257 1252 * chain, then we decrement the "shadow" list entry's "mcg_num_qps"
1258 1253 * count (or zero out the entire "shadow" list entry) before returning
1259 1254 * success. Note: Zeroing out the "shadow" list entry is done
1260 1255 * inside of hermon_mcg_hash_list_remove().
1261 1256 */
1262 1257 if (mcg->mcg_num_qps == 1) {
1263 1258
1264 1259 /* Remove an MCG entry from the hash chain */
1265 1260 status = hermon_mcg_hash_list_remove(state, end_indx, prev_indx,
1266 1261 mcg_entry);
1267 1262 if (status != DDI_SUCCESS) {
1268 1263 mutex_exit(&state->hs_mcglock);
1269 1264 return (status);
1270 1265 }
1271 1266
1272 1267 } else {
1273 1268 /*
1274 1269 * Write the updated MCG entry back to the Hermon MCG table.
1275 1270 * If this succeeds, then we update the "shadow" list to
1276 1271 * reflect the change (i.e. decrement the "mcg_num_qps"),
1277 1272 * drop the lock, and return success. Note: In general,
1278 1273 * this operation shouldn't fail. If it does, then it is an
1279 1274 * indication that something (probably in HW, but maybe in SW)
1280 1275 * has gone seriously wrong.
1281 1276 */
1282 1277 mcg_entry->member_cnt = (mcg->mcg_num_qps - 1);
1283 1278 status = hermon_write_mgm_cmd_post(state, mcg_entry, end_indx,
1284 1279 HERMON_CMD_NOSLEEP_SPIN);
1285 1280 if (status != HERMON_CMD_SUCCESS) {
1286 1281 mutex_exit(&state->hs_mcglock);
1287 1282 HERMON_WARNING(state, "failed to write MCG entry");
1288 1283 cmn_err(CE_CONT, "Hermon: WRITE_MGM command failed: "
1289 1284 "%08x\n", status);
1290 1285 if (status == HERMON_CMD_INVALID_STATUS) {
1291 1286 hermon_fm_ereport(state, HCA_SYS_ERR,
1292 1287 HCA_ERR_SRV_LOST);
1293 1288 }
1294 1289 return (ibc_get_ci_failure(0));
1295 1290 }
1296 1291 mcg->mcg_num_qps--;
1297 1292 }
1298 1293
1299 1294 mutex_exit(&state->hs_mcglock);
1300 1295 return (DDI_SUCCESS);
1301 1296 }
1302 1297
1303 1298 /*
1304 1299 * hermon_qp_mcg_refcnt_inc()
1305 1300 * Context: Can be called from interrupt or base context.
1306 1301 */
1307 1302 static void
1308 1303 hermon_qp_mcg_refcnt_inc(hermon_qphdl_t qp)
1309 1304 {
1310 1305 /* Increment the QP's MCG reference count */
1311 1306 mutex_enter(&qp->qp_lock);
1312 1307 qp->qp_mcg_refcnt++;
1313 1308 mutex_exit(&qp->qp_lock);
1314 1309 }
1315 1310
1316 1311
1317 1312 /*
1318 1313 * hermon_qp_mcg_refcnt_dec()
1319 1314 * Context: Can be called from interrupt or base context.
1320 1315 */
1321 1316 static void
1322 1317 hermon_qp_mcg_refcnt_dec(hermon_qphdl_t qp)
1323 1318 {
1324 1319 /* Decrement the QP's MCG reference count */
1325 1320 mutex_enter(&qp->qp_lock);
1326 1321 qp->qp_mcg_refcnt--;
1327 1322 mutex_exit(&qp->qp_lock);
1328 1323 }
1329 1324
1330 1325
1331 1326 /*
1332 1327 * hermon_mcg_qplist_add()
1333 1328 * Context: Can be called from interrupt or base context.
1334 1329 */
1335 1330 static int
1336 1331 hermon_mcg_qplist_add(hermon_state_t *state, hermon_mcghdl_t mcg,
1337 1332 hermon_hw_mcg_qp_list_t *mcg_qplist, hermon_qphdl_t qp,
1338 1333 uint_t *qp_found)
1339 1334 {
1340 1335 uint_t qplist_indx;
1341 1336
1342 1337 ASSERT(MUTEX_HELD(&state->hs_mcglock));
1343 1338
1344 1339 qplist_indx = mcg->mcg_num_qps;
1345 1340
1346 1341 /*
1347 1342 * Determine if we have exceeded the maximum number of QP per
1348 1343 * multicast group. If we have, then return an error
1349 1344 */
1350 1345 if (qplist_indx >= state->hs_cfg_profile->cp_num_qp_per_mcg) {
1351 1346 return (IBT_HCA_MCG_QP_EXCEEDED);
1352 1347 }
1353 1348
1354 1349 /*
1355 1350 * Determine if the QP is already attached to this MCG table. If it
1356 1351 * is, then we break out and treat this operation as a NO-OP
1357 1352 */
1358 1353 for (qplist_indx = 0; qplist_indx < mcg->mcg_num_qps;
1359 1354 qplist_indx++) {
1360 1355 if (mcg_qplist[qplist_indx].qpn == qp->qp_qpnum) {
1361 1356 break;
1362 1357 }
1363 1358 }
1364 1359
1365 1360 /*
1366 1361 * If the QP was already on the list, set 'qp_found' to TRUE. We still
1367 1362 * return SUCCESS in this case, but the qplist will not have been
1368 1363 * updated because the QP was already on the list.
1369 1364 */
1370 1365 if (qplist_indx < mcg->mcg_num_qps) {
1371 1366 *qp_found = 1;
1372 1367 } else {
1373 1368 /*
1374 1369 * Otherwise, append the new QP number to the end of the
1375 1370 * current QP list. Note: We will increment the "mcg_num_qps"
1376 1371 * field on the "shadow" MCG list entry later (after we know
1377 1372 * that all necessary Hermon firmware accesses have been
1378 1373 * successful).
1379 1374 *
1380 1375 * Set 'qp_found' to 0 so we know the QP was added on to the
1381 1376 * list for sure.
1382 1377 */
1383 1378 mcg_qplist[qplist_indx].qpn =
1384 1379 (qp->qp_qpnum | HERMON_MCG_QPN_BLOCK_LB);
1385 1380 *qp_found = 0;
1386 1381 }
1387 1382
1388 1383 return (DDI_SUCCESS);
1389 1384 }
1390 1385
1391 1386
1392 1387
1393 1388 /*
1394 1389 * hermon_mcg_qplist_remove()
1395 1390 * Context: Can be called from interrupt or base context.
1396 1391 */
1397 1392 static int
1398 1393 hermon_mcg_qplist_remove(hermon_mcghdl_t mcg,
1399 1394 hermon_hw_mcg_qp_list_t *mcg_qplist, hermon_qphdl_t qp)
1400 1395 {
1401 1396 uint_t i, qplist_indx;
1402 1397
1403 1398 /*
1404 1399 * Search the MCG QP list for a matching QPN. When
1405 1400 * it's found, we swap the last entry with the current
1406 1401 * one, set the last entry to zero, decrement the last
1407 1402 * entry, and return. If it's not found, then it's
1408 1403 * and error.
1409 1404 */
1410 1405 qplist_indx = mcg->mcg_num_qps;
1411 1406 for (i = 0; i < qplist_indx; i++) {
1412 1407 if (mcg_qplist[i].qpn == qp->qp_qpnum) {
1413 1408 mcg_qplist[i] = mcg_qplist[qplist_indx - 1];
1414 1409 mcg_qplist[qplist_indx - 1].qpn = 0;
1415 1410
1416 1411 return (DDI_SUCCESS);
1417 1412 }
1418 1413 }
1419 1414
1420 1415 return (IBT_QP_HDL_INVALID);
1421 1416 }
1422 1417
1423 1418
1424 1419 /*
1425 1420 * hermon_mcg_walk_mgid_hash()
1426 1421 * Context: Can be called from interrupt or base context.
1427 1422 */
1428 1423 static uint_t
1429 1424 hermon_mcg_walk_mgid_hash(hermon_state_t *state, uint64_t start_indx,
1430 1425 ib_gid_t mgid, uint_t *p_indx)
1431 1426 {
1432 1427 hermon_mcghdl_t curr_mcghdl;
1433 1428 uint_t curr_indx, prev_indx;
1434 1429
1435 1430 ASSERT(MUTEX_HELD(&state->hs_mcglock));
1436 1431
1437 1432 /* Start at the head of the hash chain */
1438 1433 curr_indx = (uint_t)start_indx;
1439 1434 prev_indx = curr_indx;
1440 1435 curr_mcghdl = &state->hs_mcghdl[curr_indx];
1441 1436
1442 1437 /* If the first entry in the chain has MGID == 0, then stop */
1443 1438 if ((curr_mcghdl->mcg_mgid_h == 0) &&
1444 1439 (curr_mcghdl->mcg_mgid_l == 0)) {
1445 1440 goto end_mgid_hash_walk;
1446 1441 }
1447 1442
1448 1443 /* If the first entry in the chain matches the MGID, then stop */
1449 1444 if ((curr_mcghdl->mcg_mgid_h == mgid.gid_prefix) &&
1450 1445 (curr_mcghdl->mcg_mgid_l == mgid.gid_guid)) {
1451 1446 goto end_mgid_hash_walk;
1452 1447 }
1453 1448
1454 1449 /* Otherwise, walk the hash chain looking for a match */
1455 1450 while (curr_mcghdl->mcg_next_indx != 0) {
1456 1451 prev_indx = curr_indx;
1457 1452 curr_indx = curr_mcghdl->mcg_next_indx;
1458 1453 curr_mcghdl = &state->hs_mcghdl[curr_indx];
1459 1454
1460 1455 if ((curr_mcghdl->mcg_mgid_h == mgid.gid_prefix) &&
1461 1456 (curr_mcghdl->mcg_mgid_l == mgid.gid_guid)) {
1462 1457 break;
1463 1458 }
1464 1459 }
1465 1460
1466 1461 end_mgid_hash_walk:
1467 1462 /*
1468 1463 * If necessary, return the index of the previous entry too. This
1469 1464 * is primarily used for detaching a QP from a multicast group. It
1470 1465 * may be necessary, in that case, to delete an MCG entry from the
1471 1466 * hash chain and having the index of the previous entry is helpful.
1472 1467 */
1473 1468 if (p_indx != NULL) {
1474 1469 *p_indx = prev_indx;
1475 1470 }
1476 1471 return (curr_indx);
1477 1472 }
1478 1473
1479 1474
1480 1475 /*
1481 1476 * hermon_mcg_setup_new_hdr()
1482 1477 * Context: Can be called from interrupt or base context.
1483 1478 */
1484 1479 static void
1485 1480 hermon_mcg_setup_new_hdr(hermon_mcghdl_t mcg, hermon_hw_mcg_t *mcg_hdr,
1486 1481 ib_gid_t mgid, hermon_rsrc_t *mcg_rsrc)
1487 1482 {
1488 1483 /*
1489 1484 * Fill in the fields of the "shadow" entry used by software
1490 1485 * to track MCG hardware entry
1491 1486 */
1492 1487 mcg->mcg_mgid_h = mgid.gid_prefix;
1493 1488 mcg->mcg_mgid_l = mgid.gid_guid;
1494 1489 mcg->mcg_rsrcp = mcg_rsrc;
1495 1490 mcg->mcg_next_indx = 0;
1496 1491 mcg->mcg_num_qps = 0;
1497 1492
1498 1493 /*
1499 1494 * Fill the header fields of the MCG entry (in the temporary copy)
1500 1495 */
1501 1496 mcg_hdr->mgid_h = mgid.gid_prefix;
1502 1497 mcg_hdr->mgid_l = mgid.gid_guid;
1503 1498 mcg_hdr->next_gid_indx = 0;
1504 1499 }
1505 1500
1506 1501
1507 1502 /*
1508 1503 * hermon_mcg_hash_list_remove()
1509 1504 * Context: Can be called only from user or kernel context.
1510 1505 */
1511 1506 static int
1512 1507 hermon_mcg_hash_list_remove(hermon_state_t *state, uint_t curr_indx,
1513 1508 uint_t prev_indx, hermon_hw_mcg_t *mcg_entry)
1514 1509 {
1515 1510 hermon_mcghdl_t curr_mcg, prev_mcg, next_mcg;
1516 1511 uint_t next_indx;
1517 1512 int status;
1518 1513
1519 1514 /* Get the pointer to "shadow" list for current entry */
1520 1515 curr_mcg = &state->hs_mcghdl[curr_indx];
1521 1516
1522 1517 /*
1523 1518 * If this is the first entry on a hash chain, then attempt to replace
1524 1519 * the entry with the next entry on the chain. If there are no
1525 1520 * subsequent entries on the chain, then this is the only entry and
1526 1521 * should be invalidated.
1527 1522 */
1528 1523 if (curr_indx == prev_indx) {
1529 1524
1530 1525 /*
1531 1526 * If this is the only entry on the chain, then invalidate it.
1532 1527 * Note: Invalidating an MCG entry means writing all zeros
1533 1528 * to the entry. This is only necessary for those MCG
1534 1529 * entries that are the "head" entries of the individual hash
1535 1530 * chains. Regardless of whether this operation returns
1536 1531 * success or failure, return that result to the caller.
1537 1532 */
1538 1533 next_indx = curr_mcg->mcg_next_indx;
1539 1534 if (next_indx == 0) {
1540 1535 status = hermon_mcg_entry_invalidate(state, mcg_entry,
1541 1536 curr_indx);
1542 1537 bzero(curr_mcg, sizeof (struct hermon_sw_mcg_list_s));
1543 1538 return (status);
1544 1539 }
1545 1540
1546 1541 /*
1547 1542 * Otherwise, this is just the first entry on the chain, so
1548 1543 * grab the next one
1549 1544 */
1550 1545 next_mcg = &state->hs_mcghdl[next_indx];
1551 1546
1552 1547 /*
1553 1548 * Read the next MCG entry into the temporary MCG. Note:
1554 1549 * In general, this operation shouldn't fail. If it does,
1555 1550 * then it is an indication that something (probably in HW,
1556 1551 * but maybe in SW) has gone seriously wrong.
1557 1552 */
1558 1553 status = hermon_read_mgm_cmd_post(state, mcg_entry, next_indx,
1559 1554 HERMON_CMD_NOSLEEP_SPIN);
1560 1555 if (status != HERMON_CMD_SUCCESS) {
1561 1556 HERMON_WARNING(state, "failed to read MCG entry");
1562 1557 cmn_err(CE_CONT, "Hermon: READ_MGM command failed: "
1563 1558 "%08x\n", status);
1564 1559 if (status == HERMON_CMD_INVALID_STATUS) {
1565 1560 hermon_fm_ereport(state, HCA_SYS_ERR,
1566 1561 HCA_ERR_SRV_LOST);
1567 1562 }
1568 1563 return (ibc_get_ci_failure(0));
1569 1564 }
1570 1565
1571 1566 /*
1572 1567 * Copy/Write the temporary MCG back to the hardware MCG list
1573 1568 * using the current index. This essentially removes the
1574 1569 * current MCG entry from the list by writing over it with
1575 1570 * the next one. If this is successful, then we can do the
1576 1571 * same operation for the "shadow" list. And we can also
1577 1572 * free up the Hermon MCG entry resource that was associated
1578 1573 * with the (old) next entry. Note: In general, this
1579 1574 * operation shouldn't fail. If it does, then it is an
1580 1575 * indication that something (probably in HW, but maybe in SW)
1581 1576 * has gone seriously wrong.
1582 1577 */
1583 1578 status = hermon_write_mgm_cmd_post(state, mcg_entry, curr_indx,
1584 1579 HERMON_CMD_NOSLEEP_SPIN);
1585 1580 if (status != HERMON_CMD_SUCCESS) {
1586 1581 HERMON_WARNING(state, "failed to write MCG entry");
1587 1582 cmn_err(CE_CONT, "Hermon: WRITE_MGM command failed: "
1588 1583 "%08x\n", status);
1589 1584 if (status == HERMON_CMD_INVALID_STATUS) {
1590 1585 hermon_fm_ereport(state, HCA_SYS_ERR,
1591 1586 HCA_ERR_SRV_LOST);
1592 1587 }
1593 1588 return (ibc_get_ci_failure(0));
1594 1589 }
1595 1590
1596 1591 /*
1597 1592 * Copy all the software tracking information from the next
1598 1593 * entry on the "shadow" MCG list into the current entry on
1599 1594 * the list. Then invalidate (zero out) the other "shadow"
1600 1595 * list entry.
1601 1596 */
1602 1597 bcopy(next_mcg, curr_mcg, sizeof (struct hermon_sw_mcg_list_s));
1603 1598 bzero(next_mcg, sizeof (struct hermon_sw_mcg_list_s));
1604 1599
1605 1600 /*
1606 1601 * Free up the Hermon MCG entry resource used by the "next"
1607 1602 * MCG entry. That resource is no longer needed by any
1608 1603 * MCG entry which is first on a hash chain (like the "next"
1609 1604 * entry has just become).
1610 1605 */
1611 1606 hermon_rsrc_free(state, &curr_mcg->mcg_rsrcp);
1612 1607
1613 1608 return (DDI_SUCCESS);
1614 1609 }
1615 1610
1616 1611 /*
1617 1612 * Else if this is the last entry on the hash chain (or a middle
1618 1613 * entry, then we update the previous entry's "next_gid_index" field
1619 1614 * to make it point instead to the next entry on the chain. By
1620 1615 * skipping over the removed entry in this way, we can then free up
1621 1616 * any resources associated with the current entry. Note: We don't
1622 1617 * need to invalidate the "skipped over" hardware entry because it
1623 1618 * will no be longer connected to any hash chains, and if/when it is
1624 1619 * finally re-used, it will be written with entirely new values.
1625 1620 */
1626 1621
1627 1622 /*
1628 1623 * Read the next MCG entry into the temporary MCG. Note: In general,
1629 1624 * this operation shouldn't fail. If it does, then it is an
1630 1625 * indication that something (probably in HW, but maybe in SW) has
1631 1626 * gone seriously wrong.
1632 1627 */
1633 1628 status = hermon_read_mgm_cmd_post(state, mcg_entry, prev_indx,
1634 1629 HERMON_CMD_NOSLEEP_SPIN);
1635 1630 if (status != HERMON_CMD_SUCCESS) {
1636 1631 HERMON_WARNING(state, "failed to read MCG entry");
1637 1632 cmn_err(CE_CONT, "Hermon: READ_MGM command failed: %08x\n",
1638 1633 status);
1639 1634 if (status == HERMON_CMD_INVALID_STATUS) {
1640 1635 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
1641 1636 }
1642 1637 return (ibc_get_ci_failure(0));
1643 1638 }
1644 1639
1645 1640 /*
1646 1641 * Finally, we update the "next_gid_indx" field in the temporary MCG
1647 1642 * and attempt to write the entry back into the Hermon MCG table. If
1648 1643 * this succeeds, then we update the "shadow" list to reflect the
1649 1644 * change, free up the Hermon MCG entry resource that was associated
1650 1645 * with the current entry, and return success. Note: In general,
1651 1646 * this operation shouldn't fail. If it does, then it is an indication
1652 1647 * that something (probably in HW, but maybe in SW) has gone seriously
1653 1648 * wrong.
1654 1649 */
1655 1650 mcg_entry->next_gid_indx = curr_mcg->mcg_next_indx;
1656 1651 status = hermon_write_mgm_cmd_post(state, mcg_entry, prev_indx,
1657 1652 HERMON_CMD_NOSLEEP_SPIN);
1658 1653 if (status != HERMON_CMD_SUCCESS) {
1659 1654 HERMON_WARNING(state, "failed to write MCG entry");
1660 1655 cmn_err(CE_CONT, "Hermon: WRITE_MGM command failed: %08x\n",
1661 1656 status);
1662 1657 if (status == HERMON_CMD_INVALID_STATUS) {
1663 1658 hermon_fm_ereport(state, HCA_SYS_ERR,
1664 1659 HCA_ERR_SRV_LOST);
1665 1660 }
1666 1661 return (ibc_get_ci_failure(0));
1667 1662 }
1668 1663
1669 1664 /*
1670 1665 * Get the pointer to the "shadow" MCG list entry for the previous
1671 1666 * MCG. Update its "mcg_next_indx" to point to the next entry
1672 1667 * the one after the current entry. Note: This next index may be
1673 1668 * zero, indicating the end of the list.
1674 1669 */
1675 1670 prev_mcg = &state->hs_mcghdl[prev_indx];
1676 1671 prev_mcg->mcg_next_indx = curr_mcg->mcg_next_indx;
1677 1672
1678 1673 /*
1679 1674 * Free up the Hermon MCG entry resource used by the current entry.
1680 1675 * This resource is no longer needed because the chain now skips over
1681 1676 * the current entry. Then invalidate (zero out) the current "shadow"
1682 1677 * list entry.
1683 1678 */
1684 1679 hermon_rsrc_free(state, &curr_mcg->mcg_rsrcp);
1685 1680 bzero(curr_mcg, sizeof (struct hermon_sw_mcg_list_s));
1686 1681
1687 1682 return (DDI_SUCCESS);
1688 1683 }
1689 1684
1690 1685
1691 1686 /*
1692 1687 * hermon_mcg_entry_invalidate()
1693 1688 * Context: Can be called only from user or kernel context.
1694 1689 */
1695 1690 static int
1696 1691 hermon_mcg_entry_invalidate(hermon_state_t *state, hermon_hw_mcg_t *mcg_entry,
1697 1692 uint_t indx)
1698 1693 {
1699 1694 int status;
1700 1695
1701 1696 /*
1702 1697 * Invalidate the hardware MCG entry by zeroing out this temporary
1703 1698 * MCG and writing it the the hardware. Note: In general, this
1704 1699 * operation shouldn't fail. If it does, then it is an indication
1705 1700 * that something (probably in HW, but maybe in SW) has gone seriously
1706 1701 * wrong.
1707 1702 */
1708 1703 bzero(mcg_entry, HERMON_MCGMEM_SZ(state));
1709 1704 status = hermon_write_mgm_cmd_post(state, mcg_entry, indx,
1710 1705 HERMON_CMD_NOSLEEP_SPIN);
1711 1706 if (status != HERMON_CMD_SUCCESS) {
1712 1707 HERMON_WARNING(state, "failed to write MCG entry");
1713 1708 cmn_err(CE_CONT, "Hermon: WRITE_MGM command failed: %08x\n",
1714 1709 status);
1715 1710 if (status == HERMON_CMD_INVALID_STATUS) {
1716 1711 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
1717 1712 }
1718 1713 return (ibc_get_ci_failure(0));
1719 1714 }
1720 1715
1721 1716 return (DDI_SUCCESS);
1722 1717 }
1723 1718
1724 1719
1725 1720 /*
1726 1721 * hermon_mgid_is_valid()
1727 1722 * Context: Can be called from interrupt or base context.
1728 1723 */
1729 1724 static int
1730 1725 hermon_mgid_is_valid(ib_gid_t gid)
1731 1726 {
1732 1727 uint_t topbits, flags, scope;
1733 1728
1734 1729 /*
1735 1730 * According to IBA 1.1 specification (section 4.1.1) a valid
1736 1731 * "multicast GID" must have its top eight bits set to all ones
1737 1732 */
1738 1733 topbits = (gid.gid_prefix >> HERMON_MCG_TOPBITS_SHIFT) &
1739 1734 HERMON_MCG_TOPBITS_MASK;
1740 1735 if (topbits != HERMON_MCG_TOPBITS) {
1741 1736 return (0);
1742 1737 }
1743 1738
1744 1739 /*
1745 1740 * The next 4 bits are the "flag" bits. These are valid only
1746 1741 * if they are "0" (which correspond to permanently assigned/
1747 1742 * "well-known" multicast GIDs) or "1" (for so-called "transient"
1748 1743 * multicast GIDs). All other values are reserved.
1749 1744 */
1750 1745 flags = (gid.gid_prefix >> HERMON_MCG_FLAGS_SHIFT) &
1751 1746 HERMON_MCG_FLAGS_MASK;
1752 1747 if (!((flags == HERMON_MCG_FLAGS_PERM) ||
1753 1748 (flags == HERMON_MCG_FLAGS_NONPERM))) {
1754 1749 return (0);
1755 1750 }
1756 1751
1757 1752 /*
1758 1753 * The next 4 bits are the "scope" bits. These are valid only
1759 1754 * if they are "2" (Link-local), "5" (Site-local), "8"
1760 1755 * (Organization-local) or "E" (Global). All other values
1761 1756 * are reserved (or currently unassigned).
1762 1757 */
1763 1758 scope = (gid.gid_prefix >> HERMON_MCG_SCOPE_SHIFT) &
1764 1759 HERMON_MCG_SCOPE_MASK;
1765 1760 if (!((scope == HERMON_MCG_SCOPE_LINKLOC) ||
1766 1761 (scope == HERMON_MCG_SCOPE_SITELOC) ||
1767 1762 (scope == HERMON_MCG_SCOPE_ORGLOC) ||
1768 1763 (scope == HERMON_MCG_SCOPE_GLOBAL))) {
1769 1764 return (0);
1770 1765 }
1771 1766
1772 1767 /*
1773 1768 * If it passes all of the above checks, then we will consider it
1774 1769 * a valid multicast GID.
1775 1770 */
1776 1771 return (1);
1777 1772 }
1778 1773
1779 1774
1780 1775 /*
1781 1776 * hermon_mlid_is_valid()
1782 1777 * Context: Can be called from interrupt or base context.
1783 1778 */
1784 1779 static int
1785 1780 hermon_mlid_is_valid(ib_lid_t lid)
1786 1781 {
1787 1782 /*
1788 1783 * According to IBA 1.1 specification (section 4.1.1) a valid
1789 1784 * "multicast DLID" must be between 0xC000 and 0xFFFE.
1790 1785 */
1791 1786 if ((lid < IB_LID_MC_FIRST) || (lid > IB_LID_MC_LAST)) {
1792 1787 return (0);
1793 1788 }
1794 1789
1795 1790 return (1);
1796 1791 }
1797 1792
1798 1793
1799 1794 /*
1800 1795 * hermon_pd_alloc()
1801 1796 * Context: Can be called only from user or kernel context.
1802 1797 */
1803 1798 int
1804 1799 hermon_pd_alloc(hermon_state_t *state, hermon_pdhdl_t *pdhdl, uint_t sleepflag)
1805 1800 {
1806 1801 hermon_rsrc_t *rsrc;
1807 1802 hermon_pdhdl_t pd;
1808 1803 int status;
1809 1804
1810 1805 /*
1811 1806 * Allocate the software structure for tracking the protection domain
↓ open down ↓ |
1126 lines elided |
↑ open up ↑ |
1812 1807 * (i.e. the Hermon Protection Domain handle). By default each PD
1813 1808 * structure will have a unique PD number assigned to it. All that
1814 1809 * is necessary is for software to initialize the PD reference count
1815 1810 * (to zero) and return success.
1816 1811 */
1817 1812 status = hermon_rsrc_alloc(state, HERMON_PDHDL, 1, sleepflag, &rsrc);
1818 1813 if (status != DDI_SUCCESS) {
1819 1814 return (IBT_INSUFF_RESOURCE);
1820 1815 }
1821 1816 pd = (hermon_pdhdl_t)rsrc->hr_addr;
1822 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pd))
1823 1817
1824 1818 pd->pd_refcnt = 0;
1825 1819 *pdhdl = pd;
1826 1820
1827 1821 return (DDI_SUCCESS);
1828 1822 }
1829 1823
1830 1824
1831 1825 /*
1832 1826 * hermon_pd_free()
1833 1827 * Context: Can be called only from user or kernel context.
1834 1828 */
1835 1829 int
1836 1830 hermon_pd_free(hermon_state_t *state, hermon_pdhdl_t *pdhdl)
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
1837 1831 {
1838 1832 hermon_rsrc_t *rsrc;
1839 1833 hermon_pdhdl_t pd;
1840 1834
1841 1835 /*
1842 1836 * Pull all the necessary information from the Hermon Protection Domain
1843 1837 * handle. This is necessary here because the resource for the
1844 1838 * PD is going to be freed up as part of this operation.
1845 1839 */
1846 1840 pd = *pdhdl;
1847 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pd))
1848 1841 rsrc = pd->pd_rsrcp;
1849 1842
1850 1843 /*
1851 1844 * Check the PD reference count. If the reference count is non-zero,
1852 1845 * then it means that this protection domain is still referenced by
1853 1846 * some memory region, queue pair, address handle, or other IB object
1854 1847 * If it is non-zero, then return an error. Otherwise, free the
1855 1848 * Hermon resource and return success.
1856 1849 */
1857 1850 if (pd->pd_refcnt != 0) {
1858 1851 return (IBT_PD_IN_USE);
1859 1852 }
1860 1853
1861 1854 /* Free the Hermon Protection Domain handle */
1862 1855 hermon_rsrc_free(state, &rsrc);
1863 1856
1864 1857 /* Set the pdhdl pointer to NULL and return success */
1865 1858 *pdhdl = (hermon_pdhdl_t)NULL;
1866 1859
1867 1860 return (DDI_SUCCESS);
1868 1861 }
1869 1862
1870 1863
1871 1864 /*
1872 1865 * hermon_pd_refcnt_inc()
1873 1866 * Context: Can be called from interrupt or base context.
1874 1867 */
1875 1868 void
1876 1869 hermon_pd_refcnt_inc(hermon_pdhdl_t pd)
1877 1870 {
1878 1871 /* Increment the protection domain's reference count */
1879 1872 atomic_inc_32(&pd->pd_refcnt);
1880 1873 }
1881 1874
1882 1875
1883 1876 /*
1884 1877 * hermon_pd_refcnt_dec()
1885 1878 * Context: Can be called from interrupt or base context.
1886 1879 */
1887 1880 void
1888 1881 hermon_pd_refcnt_dec(hermon_pdhdl_t pd)
1889 1882 {
1890 1883 /* Decrement the protection domain's reference count */
1891 1884 atomic_dec_32(&pd->pd_refcnt);
1892 1885 }
1893 1886
1894 1887
1895 1888 /*
1896 1889 * hermon_port_query()
1897 1890 * Context: Can be called only from user or kernel context.
1898 1891 */
1899 1892 int
1900 1893 hermon_port_query(hermon_state_t *state, uint_t port, ibt_hca_portinfo_t *pi)
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
1901 1894 {
1902 1895 sm_portinfo_t portinfo;
1903 1896 sm_guidinfo_t guidinfo;
1904 1897 sm_pkey_table_t pkeytable;
1905 1898 ib_gid_t *sgid;
1906 1899 uint_t sgid_max, pkey_max, tbl_size;
1907 1900 int i, j, indx, status;
1908 1901 ib_pkey_t *pkeyp;
1909 1902 ib_guid_t *guidp;
1910 1903
1911 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pi))
1912 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*state))
1913 -
1914 1904 /* Validate that specified port number is legal */
1915 1905 if (!hermon_portnum_is_valid(state, port)) {
1916 1906 return (IBT_HCA_PORT_INVALID);
1917 1907 }
1918 1908 pkeyp = state->hs_pkey[port - 1];
1919 1909 guidp = state->hs_guid[port - 1];
1920 1910
1921 1911 /*
1922 1912 * We use the Hermon MAD_IFC command to post a GetPortInfo MAD
1923 1913 * to the firmware (for the specified port number). This returns
1924 1914 * a full PortInfo MAD (in "portinfo") which we subsequently
1925 1915 * parse to fill in the "ibt_hca_portinfo_t" structure returned
1926 1916 * to the IBTF.
1927 1917 */
1928 1918 status = hermon_getportinfo_cmd_post(state, port,
1929 1919 HERMON_SLEEPFLAG_FOR_CONTEXT(), &portinfo);
1930 1920 if (status != HERMON_CMD_SUCCESS) {
1931 1921 cmn_err(CE_CONT, "Hermon: GetPortInfo (port %02d) command "
1932 1922 "failed: %08x\n", port, status);
1933 1923 if (status == HERMON_CMD_INVALID_STATUS) {
1934 1924 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
1935 1925 }
1936 1926 return (ibc_get_ci_failure(0));
1937 1927 }
1938 1928
1939 1929 /*
1940 1930 * Parse the PortInfo MAD and fill in the IBTF structure
1941 1931 */
1942 1932 pi->p_base_lid = portinfo.LID;
1943 1933 pi->p_qkey_violations = portinfo.Q_KeyViolations;
1944 1934 pi->p_pkey_violations = portinfo.P_KeyViolations;
1945 1935 pi->p_sm_sl = portinfo.MasterSMSL;
1946 1936 pi->p_sm_lid = portinfo.MasterSMLID;
1947 1937 pi->p_linkstate = portinfo.PortState;
1948 1938 pi->p_port_num = portinfo.LocalPortNum;
1949 1939 pi->p_phys_state = portinfo.PortPhysicalState;
1950 1940 pi->p_width_supported = portinfo.LinkWidthSupported;
1951 1941 pi->p_width_enabled = portinfo.LinkWidthEnabled;
1952 1942 pi->p_width_active = portinfo.LinkWidthActive;
1953 1943 pi->p_speed_supported = portinfo.LinkSpeedSupported;
1954 1944 pi->p_speed_enabled = portinfo.LinkSpeedEnabled;
1955 1945 pi->p_speed_active = portinfo.LinkSpeedActive;
1956 1946 pi->p_mtu = portinfo.MTUCap;
1957 1947 pi->p_lmc = portinfo.LMC;
1958 1948 pi->p_max_vl = portinfo.VLCap;
1959 1949 pi->p_subnet_timeout = portinfo.SubnetTimeOut;
1960 1950 pi->p_msg_sz = ((uint32_t)1 << HERMON_QP_LOG_MAX_MSGSZ);
1961 1951 tbl_size = state->hs_cfg_profile->cp_log_max_gidtbl;
1962 1952 pi->p_sgid_tbl_sz = (1 << tbl_size);
1963 1953 tbl_size = state->hs_cfg_profile->cp_log_max_pkeytbl;
1964 1954 pi->p_pkey_tbl_sz = (1 << tbl_size);
1965 1955 state->hs_sn_prefix[port - 1] = portinfo.GidPrefix;
1966 1956
1967 1957 /*
1968 1958 * Convert InfiniBand-defined port capability flags to the format
1969 1959 * specified by the IBTF
1970 1960 */
1971 1961 if (portinfo.CapabilityMask & SM_CAP_MASK_IS_SM)
1972 1962 pi->p_capabilities |= IBT_PORT_CAP_SM;
1973 1963 if (portinfo.CapabilityMask & SM_CAP_MASK_IS_SM_DISABLED)
1974 1964 pi->p_capabilities |= IBT_PORT_CAP_SM_DISABLED;
1975 1965 if (portinfo.CapabilityMask & SM_CAP_MASK_IS_SNMP_SUPPD)
1976 1966 pi->p_capabilities |= IBT_PORT_CAP_SNMP_TUNNEL;
1977 1967 if (portinfo.CapabilityMask & SM_CAP_MASK_IS_DM_SUPPD)
1978 1968 pi->p_capabilities |= IBT_PORT_CAP_DM;
1979 1969 if (portinfo.CapabilityMask & SM_CAP_MASK_IS_VM_SUPPD)
1980 1970 pi->p_capabilities |= IBT_PORT_CAP_VENDOR;
1981 1971 if (portinfo.CapabilityMask & SM_CAP_MASK_IS_CLNT_REREG_SUPPD)
1982 1972 pi->p_capabilities |= IBT_PORT_CAP_CLNT_REREG;
1983 1973
1984 1974 /*
1985 1975 * Fill in the SGID table. Since the only access to the Hermon
1986 1976 * GID tables is through the firmware's MAD_IFC interface, we
1987 1977 * post as many GetGUIDInfo MADs as necessary to read in the entire
1988 1978 * contents of the SGID table (for the specified port). Note: The
1989 1979 * GetGUIDInfo command only gets eight GUIDs per operation. These
1990 1980 * GUIDs are then appended to the GID prefix for the port (from the
1991 1981 * GetPortInfo above) to form the entire SGID table.
1992 1982 */
1993 1983 for (i = 0; i < pi->p_sgid_tbl_sz; i += 8) {
1994 1984 status = hermon_getguidinfo_cmd_post(state, port, i >> 3,
1995 1985 HERMON_SLEEPFLAG_FOR_CONTEXT(), &guidinfo);
1996 1986 if (status != HERMON_CMD_SUCCESS) {
1997 1987 cmn_err(CE_CONT, "Hermon: GetGUIDInfo (port %02d) "
1998 1988 "command failed: %08x\n", port, status);
1999 1989 if (status == HERMON_CMD_INVALID_STATUS) {
2000 1990 hermon_fm_ereport(state, HCA_SYS_ERR,
↓ open down ↓ |
77 lines elided |
↑ open up ↑ |
2001 1991 HCA_ERR_SRV_LOST);
2002 1992 }
2003 1993 return (ibc_get_ci_failure(0));
2004 1994 }
2005 1995
2006 1996 /* Figure out how many of the entries are valid */
2007 1997 sgid_max = min((pi->p_sgid_tbl_sz - i), 8);
2008 1998 for (j = 0; j < sgid_max; j++) {
2009 1999 indx = (i + j);
2010 2000 sgid = &pi->p_sgid_tbl[indx];
2011 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgid))
2012 2001 sgid->gid_prefix = portinfo.GidPrefix;
2013 2002 guidp[indx] = sgid->gid_guid =
2014 2003 guidinfo.GUIDBlocks[j];
2015 2004 }
2016 2005 }
2017 2006
2018 2007 /*
2019 2008 * Fill in the PKey table. Just as for the GID tables above, the
2020 2009 * only access to the Hermon PKey tables is through the firmware's
2021 2010 * MAD_IFC interface. We post as many GetPKeyTable MADs as necessary
2022 2011 * to read in the entire contents of the PKey table (for the specified
2023 2012 * port). Note: The GetPKeyTable command only gets 32 PKeys per
2024 2013 * operation.
2025 2014 */
2026 2015 for (i = 0; i < pi->p_pkey_tbl_sz; i += 32) {
2027 2016 status = hermon_getpkeytable_cmd_post(state, port, i,
2028 2017 HERMON_SLEEPFLAG_FOR_CONTEXT(), &pkeytable);
2029 2018 if (status != HERMON_CMD_SUCCESS) {
2030 2019 cmn_err(CE_CONT, "Hermon: GetPKeyTable (port %02d) "
2031 2020 "command failed: %08x\n", port, status);
2032 2021 if (status == HERMON_CMD_INVALID_STATUS) {
2033 2022 hermon_fm_ereport(state, HCA_SYS_ERR,
2034 2023 HCA_ERR_SRV_LOST);
2035 2024 }
2036 2025 return (ibc_get_ci_failure(0));
2037 2026 }
2038 2027
2039 2028 /* Figure out how many of the entries are valid */
2040 2029 pkey_max = min((pi->p_pkey_tbl_sz - i), 32);
2041 2030 for (j = 0; j < pkey_max; j++) {
2042 2031 indx = (i + j);
2043 2032 pkeyp[indx] = pi->p_pkey_tbl[indx] =
2044 2033 pkeytable.P_KeyTableBlocks[j];
2045 2034 }
2046 2035 }
2047 2036
2048 2037 return (DDI_SUCCESS);
2049 2038 }
2050 2039
2051 2040
2052 2041 /*
2053 2042 * hermon_port_modify()
2054 2043 * Context: Can be called only from user or kernel context.
2055 2044 */
2056 2045 /* ARGSUSED */
2057 2046 int
2058 2047 hermon_port_modify(hermon_state_t *state, uint8_t port,
2059 2048 ibt_port_modify_flags_t flags, uint8_t init_type)
2060 2049 {
2061 2050 sm_portinfo_t portinfo;
2062 2051 uint32_t capmask;
2063 2052 int status;
2064 2053 hermon_hw_set_port_t set_port;
2065 2054
2066 2055 /*
2067 2056 * Return an error if either of the unsupported flags are set
2068 2057 */
2069 2058 if ((flags & IBT_PORT_SHUTDOWN) ||
2070 2059 (flags & IBT_PORT_SET_INIT_TYPE)) {
2071 2060 return (IBT_NOT_SUPPORTED);
2072 2061 }
2073 2062
2074 2063 bzero(&set_port, sizeof (set_port));
2075 2064
2076 2065 /*
2077 2066 * Determine whether we are trying to reset the QKey counter
2078 2067 */
2079 2068 if (flags & IBT_PORT_RESET_QKEY)
2080 2069 set_port.rqk = 1;
2081 2070
2082 2071 /* Validate that specified port number is legal */
2083 2072 if (!hermon_portnum_is_valid(state, port)) {
2084 2073 return (IBT_HCA_PORT_INVALID);
2085 2074 }
2086 2075
2087 2076 /*
2088 2077 * Use the Hermon MAD_IFC command to post a GetPortInfo MAD to the
2089 2078 * firmware (for the specified port number). This returns a full
2090 2079 * PortInfo MAD (in "portinfo") from which we pull the current
2091 2080 * capability mask. We then modify the capability mask as directed
2092 2081 * by the "pmod_flags" field, and write the updated capability mask
2093 2082 * using the Hermon SET_IB command (below).
2094 2083 */
2095 2084 status = hermon_getportinfo_cmd_post(state, port,
2096 2085 HERMON_SLEEPFLAG_FOR_CONTEXT(), &portinfo);
2097 2086 if (status != HERMON_CMD_SUCCESS) {
2098 2087 if (status == HERMON_CMD_INVALID_STATUS) {
2099 2088 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
2100 2089 }
2101 2090 return (ibc_get_ci_failure(0));
2102 2091 }
2103 2092
2104 2093 /*
2105 2094 * Convert InfiniBand-defined port capability flags to the format
2106 2095 * specified by the IBTF. Specifically, we modify the capability
2107 2096 * mask based on the specified values.
2108 2097 */
2109 2098 capmask = portinfo.CapabilityMask;
2110 2099
2111 2100 if (flags & IBT_PORT_RESET_SM)
2112 2101 capmask &= ~SM_CAP_MASK_IS_SM;
2113 2102 else if (flags & IBT_PORT_SET_SM)
2114 2103 capmask |= SM_CAP_MASK_IS_SM;
2115 2104
2116 2105 if (flags & IBT_PORT_RESET_SNMP)
2117 2106 capmask &= ~SM_CAP_MASK_IS_SNMP_SUPPD;
2118 2107 else if (flags & IBT_PORT_SET_SNMP)
2119 2108 capmask |= SM_CAP_MASK_IS_SNMP_SUPPD;
2120 2109
2121 2110 if (flags & IBT_PORT_RESET_DEVMGT)
2122 2111 capmask &= ~SM_CAP_MASK_IS_DM_SUPPD;
2123 2112 else if (flags & IBT_PORT_SET_DEVMGT)
2124 2113 capmask |= SM_CAP_MASK_IS_DM_SUPPD;
2125 2114
2126 2115 if (flags & IBT_PORT_RESET_VENDOR)
2127 2116 capmask &= ~SM_CAP_MASK_IS_VM_SUPPD;
2128 2117 else if (flags & IBT_PORT_SET_VENDOR)
2129 2118 capmask |= SM_CAP_MASK_IS_VM_SUPPD;
2130 2119
2131 2120 set_port.cap_mask = capmask;
2132 2121
2133 2122 /*
2134 2123 * Use the Hermon SET_PORT command to update the capability mask and
2135 2124 * (possibly) reset the QKey violation counter for the specified port.
2136 2125 * Note: In general, this operation shouldn't fail. If it does, then
2137 2126 * it is an indication that something (probably in HW, but maybe in
2138 2127 * SW) has gone seriously wrong.
2139 2128 */
2140 2129 status = hermon_set_port_cmd_post(state, &set_port, port,
2141 2130 HERMON_SLEEPFLAG_FOR_CONTEXT());
2142 2131 if (status != HERMON_CMD_SUCCESS) {
2143 2132 HERMON_WARNING(state, "failed to modify port capabilities");
2144 2133 cmn_err(CE_CONT, "Hermon: SET_IB (port %02d) command failed: "
2145 2134 "%08x\n", port, status);
2146 2135 if (status == HERMON_CMD_INVALID_STATUS) {
2147 2136 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
2148 2137 }
2149 2138 return (ibc_get_ci_failure(0));
2150 2139 }
2151 2140
2152 2141 return (DDI_SUCCESS);
2153 2142 }
2154 2143
2155 2144
2156 2145 /*
2157 2146 * hermon_set_addr_path()
2158 2147 * Context: Can be called from interrupt or base context.
2159 2148 *
2160 2149 * Note: This routine is used for two purposes. It is used to fill in the
2161 2150 * Hermon UDAV fields, and it is used to fill in the address path information
2162 2151 * for QPs. Because the two Hermon structures are similar, common fields can
2163 2152 * be filled in here. Because they are different, however, we pass
2164 2153 * an additional flag to indicate which type is being filled and do each one
2165 2154 * uniquely
2166 2155 */
↓ open down ↓ |
145 lines elided |
↑ open up ↑ |
2167 2156
2168 2157 int hermon_srate_override = -1; /* allows ease of testing */
2169 2158
2170 2159 int
2171 2160 hermon_set_addr_path(hermon_state_t *state, ibt_adds_vect_t *av,
2172 2161 hermon_hw_addr_path_t *path, uint_t type)
2173 2162 {
2174 2163 uint_t gidtbl_sz;
2175 2164 hermon_hw_udav_t *udav;
2176 2165
2177 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*av))
2178 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*path))
2179 -
2180 2166 udav = (hermon_hw_udav_t *)(void *)path;
2181 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*udav))
2182 2167 path->mlid = av->av_src_path;
2183 2168 path->rlid = av->av_dlid;
2184 2169
2185 2170 switch (av->av_srate) {
2186 2171 case IBT_SRATE_2: /* 1xSDR-2.5Gb/s injection rate */
2187 2172 path->max_stat_rate = 7; break;
2188 2173 case IBT_SRATE_10: /* 4xSDR-10.0Gb/s injection rate */
2189 2174 path->max_stat_rate = 8; break;
2190 2175 case IBT_SRATE_30: /* 12xSDR-30Gb/s injection rate */
2191 2176 path->max_stat_rate = 9; break;
2192 2177 case IBT_SRATE_5: /* 1xDDR-5Gb/s injection rate */
2193 2178 path->max_stat_rate = 10; break;
2194 2179 case IBT_SRATE_20: /* 4xDDR-20Gb/s injection rate */
2195 2180 path->max_stat_rate = 11; break;
2196 2181 case IBT_SRATE_40: /* 4xQDR-40Gb/s injection rate */
2197 2182 path->max_stat_rate = 12; break;
2198 2183 case IBT_SRATE_60: /* 12xDDR-60Gb/s injection rate */
2199 2184 path->max_stat_rate = 13; break;
2200 2185 case IBT_SRATE_80: /* 8xQDR-80Gb/s injection rate */
2201 2186 path->max_stat_rate = 14; break;
2202 2187 case IBT_SRATE_120: /* 12xQDR-120Gb/s injection rate */
2203 2188 path->max_stat_rate = 15; break;
2204 2189 case IBT_SRATE_NOT_SPECIFIED: /* Max */
2205 2190 path->max_stat_rate = 0; break;
2206 2191 default:
2207 2192 return (IBT_STATIC_RATE_INVALID);
2208 2193 }
2209 2194 if (hermon_srate_override != -1) /* for evaluating HCA firmware */
2210 2195 path->max_stat_rate = hermon_srate_override;
2211 2196
2212 2197 /* If "grh" flag is set, then check for valid SGID index too */
2213 2198 gidtbl_sz = (1 << state->hs_queryport.log_max_gid);
2214 2199 if ((av->av_send_grh) && (av->av_sgid_ix > gidtbl_sz)) {
2215 2200 return (IBT_SGID_INVALID);
2216 2201 }
2217 2202
2218 2203 /*
2219 2204 * Fill in all "global" values regardless of the value in the GRH
2220 2205 * flag. Because "grh" is not set unless "av_send_grh" is set, the
2221 2206 * hardware will ignore the other "global" values as necessary. Note:
2222 2207 * SW does this here to enable later query operations to return
2223 2208 * exactly the same params that were passed when the addr path was
2224 2209 * last written.
2225 2210 */
2226 2211 path->grh = av->av_send_grh;
2227 2212 if (type == HERMON_ADDRPATH_QP) {
2228 2213 path->mgid_index = av->av_sgid_ix;
2229 2214 } else {
2230 2215 /*
2231 2216 * For Hermon UDAV, the "mgid_index" field is the index into
2232 2217 * a combined table (not a per-port table), but having sections
2233 2218 * for each port. So some extra calculations are necessary.
2234 2219 */
2235 2220
2236 2221 path->mgid_index = ((av->av_port_num - 1) * gidtbl_sz) +
2237 2222 av->av_sgid_ix;
2238 2223
2239 2224 udav->portnum = av->av_port_num;
2240 2225 }
2241 2226
2242 2227 /*
2243 2228 * According to Hermon PRM, the (31:0) part of rgid_l must be set to
2244 2229 * "0x2" if the 'grh' or 'g' bit is cleared. It also says that we
2245 2230 * only need to do it for UDAV's. So we enforce that here.
2246 2231 *
2247 2232 * NOTE: The entire 64 bits worth of GUID info is actually being
2248 2233 * preserved (for UDAVs) by the callers of this function
2249 2234 * (hermon_ah_alloc() and hermon_ah_modify()) and as long as the
2250 2235 * 'grh' bit is not set, the upper 32 bits (63:32) of rgid_l are
2251 2236 * "don't care".
2252 2237 */
2253 2238 if ((path->grh) || (type == HERMON_ADDRPATH_QP)) {
2254 2239 path->flow_label = av->av_flow;
2255 2240 path->tclass = av->av_tclass;
2256 2241 path->hop_limit = av->av_hop;
2257 2242 bcopy(&(av->av_dgid.gid_prefix), &(path->rgid_h),
2258 2243 sizeof (uint64_t));
2259 2244 bcopy(&(av->av_dgid.gid_guid), &(path->rgid_l),
2260 2245 sizeof (uint64_t));
2261 2246 } else {
2262 2247 path->rgid_l = 0x2;
2263 2248 path->flow_label = 0;
2264 2249 path->tclass = 0;
2265 2250 path->hop_limit = 0;
2266 2251 path->rgid_h = 0;
2267 2252 }
2268 2253 /* extract the default service level */
2269 2254 udav->sl = (HERMON_DEF_SCHED_SELECTION & 0x3C) >> 2;
2270 2255
2271 2256 return (DDI_SUCCESS);
2272 2257 }
2273 2258
2274 2259
2275 2260 /*
2276 2261 * hermon_get_addr_path()
2277 2262 * Context: Can be called from interrupt or base context.
2278 2263 *
2279 2264 * Note: Just like hermon_set_addr_path() above, this routine is used for two
2280 2265 * purposes. It is used to read in the Hermon UDAV fields, and it is used to
2281 2266 * read in the address path information for QPs. Because the two Hermon
↓ open down ↓ |
90 lines elided |
↑ open up ↑ |
2282 2267 * structures are similar, common fields can be read in here. But because
2283 2268 * they are slightly different, we pass an additional flag to indicate which
2284 2269 * type is being read.
2285 2270 */
2286 2271 void
2287 2272 hermon_get_addr_path(hermon_state_t *state, hermon_hw_addr_path_t *path,
2288 2273 ibt_adds_vect_t *av, uint_t type)
2289 2274 {
2290 2275 uint_t gidtbl_sz;
2291 2276
2292 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*path))
2293 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*av))
2294 -
2295 2277 av->av_src_path = path->mlid;
2296 2278 av->av_dlid = path->rlid;
2297 2279
2298 2280 /* Set "av_ipd" value from max_stat_rate */
2299 2281 switch (path->max_stat_rate) {
2300 2282 case 7: /* 1xSDR-2.5Gb/s injection rate */
2301 2283 av->av_srate = IBT_SRATE_2; break;
2302 2284 case 8: /* 4xSDR-10.0Gb/s injection rate */
2303 2285 av->av_srate = IBT_SRATE_10; break;
2304 2286 case 9: /* 12xSDR-30Gb/s injection rate */
2305 2287 av->av_srate = IBT_SRATE_30; break;
2306 2288 case 10: /* 1xDDR-5Gb/s injection rate */
2307 2289 av->av_srate = IBT_SRATE_5; break;
2308 2290 case 11: /* 4xDDR-20Gb/s injection rate */
2309 2291 av->av_srate = IBT_SRATE_20; break;
2310 2292 case 12: /* xQDR-40Gb/s injection rate */
2311 2293 av->av_srate = IBT_SRATE_40; break;
2312 2294 case 13: /* 12xDDR-60Gb/s injection rate */
2313 2295 av->av_srate = IBT_SRATE_60; break;
2314 2296 case 14: /* 8xQDR-80Gb/s injection rate */
2315 2297 av->av_srate = IBT_SRATE_80; break;
2316 2298 case 15: /* 12xQDR-120Gb/s injection rate */
2317 2299 av->av_srate = IBT_SRATE_120; break;
2318 2300 case 0: /* max */
2319 2301 av->av_srate = IBT_SRATE_NOT_SPECIFIED; break;
2320 2302 default: /* 1x injection rate */
2321 2303 av->av_srate = IBT_SRATE_1X;
2322 2304 }
2323 2305
2324 2306 /*
2325 2307 * Extract all "global" values regardless of the value in the GRH
2326 2308 * flag. Because "av_send_grh" is set only if "grh" is set, software
2327 2309 * knows to ignore the other "global" values as necessary. Note: SW
2328 2310 * does it this way to enable these query operations to return exactly
2329 2311 * the same params that were passed when the addr path was last written.
2330 2312 */
2331 2313 av->av_send_grh = path->grh;
2332 2314 if (type == HERMON_ADDRPATH_QP) {
2333 2315 av->av_sgid_ix = path->mgid_index;
2334 2316 } else {
2335 2317 /*
2336 2318 * For Hermon UDAV, the "mgid_index" field is the index into
2337 2319 * a combined table (not a per-port table).
2338 2320 */
2339 2321 gidtbl_sz = (1 << state->hs_queryport.log_max_gid);
2340 2322 av->av_sgid_ix = path->mgid_index - ((av->av_port_num - 1) *
2341 2323 gidtbl_sz);
2342 2324
2343 2325 av->av_port_num = ((hermon_hw_udav_t *)(void *)path)->portnum;
2344 2326 }
2345 2327 av->av_flow = path->flow_label;
2346 2328 av->av_tclass = path->tclass;
2347 2329 av->av_hop = path->hop_limit;
2348 2330 /* this is for alignment issue w/ the addr path struct in Hermon */
2349 2331 bcopy(&(path->rgid_h), &(av->av_dgid.gid_prefix), sizeof (uint64_t));
2350 2332 bcopy(&(path->rgid_l), &(av->av_dgid.gid_guid), sizeof (uint64_t));
2351 2333 }
2352 2334
2353 2335
2354 2336 /*
2355 2337 * hermon_portnum_is_valid()
2356 2338 * Context: Can be called from interrupt or base context.
2357 2339 */
2358 2340 int
2359 2341 hermon_portnum_is_valid(hermon_state_t *state, uint_t portnum)
2360 2342 {
2361 2343 uint_t max_port;
2362 2344
2363 2345 max_port = state->hs_cfg_profile->cp_num_ports;
2364 2346 if ((portnum <= max_port) && (portnum != 0)) {
2365 2347 return (1);
2366 2348 } else {
2367 2349 return (0);
2368 2350 }
2369 2351 }
2370 2352
2371 2353
2372 2354 /*
2373 2355 * hermon_pkeyindex_is_valid()
2374 2356 * Context: Can be called from interrupt or base context.
2375 2357 */
2376 2358 int
2377 2359 hermon_pkeyindex_is_valid(hermon_state_t *state, uint_t pkeyindx)
2378 2360 {
2379 2361 uint_t max_pkeyindx;
2380 2362
2381 2363 max_pkeyindx = 1 << state->hs_cfg_profile->cp_log_max_pkeytbl;
2382 2364 if (pkeyindx < max_pkeyindx) {
2383 2365 return (1);
2384 2366 } else {
2385 2367 return (0);
2386 2368 }
2387 2369 }
2388 2370
2389 2371
2390 2372 /*
2391 2373 * hermon_queue_alloc()
2392 2374 * Context: Can be called from interrupt or base context.
↓ open down ↓ |
88 lines elided |
↑ open up ↑ |
2393 2375 */
2394 2376 int
2395 2377 hermon_queue_alloc(hermon_state_t *state, hermon_qalloc_info_t *qa_info,
2396 2378 uint_t sleepflag)
2397 2379 {
2398 2380 ddi_dma_attr_t dma_attr;
2399 2381 int (*callback)(caddr_t);
2400 2382 uint64_t realsize, alloc_mask;
2401 2383 int flag, status;
2402 2384
2403 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*qa_info))
2404 -
2405 2385 /* Set the callback flag appropriately */
2406 2386 callback = (sleepflag == HERMON_SLEEP) ? DDI_DMA_SLEEP :
2407 2387 DDI_DMA_DONTWAIT;
2408 2388
2409 2389 /*
2410 2390 * Initialize many of the default DMA attributes. Then set additional
2411 2391 * alignment restrictions as necessary for the queue memory. Also
2412 2392 * respect the configured value for IOMMU bypass
2413 2393 */
2414 2394 hermon_dma_attr_init(state, &dma_attr);
2415 2395 dma_attr.dma_attr_align = qa_info->qa_bind_align;
2416 2396 #ifdef __sparc
2417 2397 if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS) {
2418 2398 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2419 2399 }
2420 2400 #endif
2421 2401
2422 2402 /* Allocate a DMA handle */
2423 2403 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr, callback, NULL,
2424 2404 &qa_info->qa_dmahdl);
2425 2405 if (status != DDI_SUCCESS) {
2426 2406 return (DDI_FAILURE);
2427 2407 }
2428 2408
2429 2409 /*
2430 2410 * Determine the amount of memory to allocate, depending on the values
2431 2411 * in "qa_bind_align" and "qa_alloc_align". The problem we are trying
2432 2412 * to solve here is that allocating a DMA handle with IOMMU bypass
2433 2413 * (DDI_DMA_FORCE_PHYSICAL) constrains us to only requesting alignments
2434 2414 * that are less restrictive than the page size. Since we may need
2435 2415 * stricter alignments on the memory allocated by ddi_dma_mem_alloc()
2436 2416 * (e.g. in Hermon QP work queue memory allocation), we use the
2437 2417 * following method to calculate how much additional memory to request,
2438 2418 * and we enforce our own alignment on the allocated result.
2439 2419 */
2440 2420 alloc_mask = qa_info->qa_alloc_align - 1;
2441 2421 if (qa_info->qa_bind_align == qa_info->qa_alloc_align) {
2442 2422 realsize = qa_info->qa_size;
2443 2423 } else {
2444 2424 realsize = qa_info->qa_size + alloc_mask;
2445 2425 }
2446 2426
2447 2427 /*
2448 2428 * If we are to allocate the queue from system memory, then use
2449 2429 * ddi_dma_mem_alloc() to find the space. Otherwise, this is a
2450 2430 * host memory allocation, use ddi_umem_alloc(). In either case,
2451 2431 * return a pointer to the memory range allocated (including any
2452 2432 * necessary alignment adjustments), the "real" memory pointer,
2453 2433 * the "real" size, and a ddi_acc_handle_t to use when reading
2454 2434 * from/writing to the memory.
2455 2435 */
2456 2436 if (qa_info->qa_location == HERMON_QUEUE_LOCATION_NORMAL) {
2457 2437 /* Allocate system memory for the queue */
2458 2438 status = ddi_dma_mem_alloc(qa_info->qa_dmahdl, realsize,
2459 2439 &state->hs_reg_accattr, DDI_DMA_CONSISTENT, callback, NULL,
2460 2440 (caddr_t *)&qa_info->qa_buf_real,
2461 2441 (size_t *)&qa_info->qa_buf_realsz, &qa_info->qa_acchdl);
2462 2442 if (status != DDI_SUCCESS) {
2463 2443 ddi_dma_free_handle(&qa_info->qa_dmahdl);
2464 2444 return (DDI_FAILURE);
2465 2445 }
2466 2446
2467 2447 /*
2468 2448 * Save temporary copy of the real pointer. (This may be
2469 2449 * modified in the last step below).
2470 2450 */
2471 2451 qa_info->qa_buf_aligned = qa_info->qa_buf_real;
2472 2452
2473 2453 bzero(qa_info->qa_buf_real, qa_info->qa_buf_realsz);
2474 2454
2475 2455 } else { /* HERMON_QUEUE_LOCATION_USERLAND */
2476 2456
2477 2457 /* Allocate userland mappable memory for the queue */
2478 2458 flag = (sleepflag == HERMON_SLEEP) ? DDI_UMEM_SLEEP :
2479 2459 DDI_UMEM_NOSLEEP;
2480 2460 qa_info->qa_buf_real = ddi_umem_alloc(realsize, flag,
2481 2461 &qa_info->qa_umemcookie);
2482 2462 if (qa_info->qa_buf_real == NULL) {
2483 2463 ddi_dma_free_handle(&qa_info->qa_dmahdl);
2484 2464 return (DDI_FAILURE);
2485 2465 }
2486 2466
2487 2467 /*
2488 2468 * Save temporary copy of the real pointer. (This may be
2489 2469 * modified in the last step below).
2490 2470 */
2491 2471 qa_info->qa_buf_aligned = qa_info->qa_buf_real;
2492 2472
2493 2473 }
2494 2474
2495 2475 /*
2496 2476 * The next to last step is to ensure that the final address
2497 2477 * ("qa_buf_aligned") has the appropriate "alloc" alignment
2498 2478 * restriction applied to it (if necessary).
2499 2479 */
2500 2480 if (qa_info->qa_bind_align != qa_info->qa_alloc_align) {
2501 2481 qa_info->qa_buf_aligned = (uint32_t *)(uintptr_t)(((uintptr_t)
2502 2482 qa_info->qa_buf_aligned + alloc_mask) & ~alloc_mask);
2503 2483 }
2504 2484 /*
2505 2485 * The last step is to figure out the offset of the start relative
2506 2486 * to the first page of the region - will be used in the eqc/cqc
2507 2487 * passed to the HW
2508 2488 */
2509 2489 qa_info->qa_pgoffs = (uint_t)((uintptr_t)
2510 2490 qa_info->qa_buf_aligned & HERMON_PAGEOFFSET);
2511 2491
2512 2492 return (DDI_SUCCESS);
↓ open down ↓ |
98 lines elided |
↑ open up ↑ |
2513 2493 }
2514 2494
2515 2495
2516 2496 /*
2517 2497 * hermon_queue_free()
2518 2498 * Context: Can be called from interrupt or base context.
2519 2499 */
2520 2500 void
2521 2501 hermon_queue_free(hermon_qalloc_info_t *qa_info)
2522 2502 {
2523 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*qa_info))
2524 -
2525 2503 /*
2526 2504 * Depending on how (i.e. from where) we allocated the memory for
2527 2505 * this queue, we choose the appropriate method for releasing the
2528 2506 * resources.
2529 2507 */
2530 2508 if (qa_info->qa_location == HERMON_QUEUE_LOCATION_NORMAL) {
2531 2509
2532 2510 ddi_dma_mem_free(&qa_info->qa_acchdl);
2533 2511
2534 2512 } else if (qa_info->qa_location == HERMON_QUEUE_LOCATION_USERLAND) {
2535 2513
2536 2514 ddi_umem_free(qa_info->qa_umemcookie);
2537 2515
2538 2516 }
2539 2517
2540 2518 /* Always free the dma handle */
2541 2519 ddi_dma_free_handle(&qa_info->qa_dmahdl);
2542 2520 }
2543 2521
2544 2522 /*
2545 2523 * hermon_create_fmr_pool()
2546 2524 * Create a pool of FMRs.
2547 2525 * Context: Can be called from kernel context only.
2548 2526 */
2549 2527 int
2550 2528 hermon_create_fmr_pool(hermon_state_t *state, hermon_pdhdl_t pd,
2551 2529 ibt_fmr_pool_attr_t *fmr_attr, hermon_fmrhdl_t *fmrpoolp)
2552 2530 {
2553 2531 hermon_fmrhdl_t fmrpool;
2554 2532 hermon_fmr_list_t *fmr, *fmr_next;
2555 2533 hermon_mrhdl_t mr;
2556 2534 int status;
2557 2535 int sleep;
2558 2536 int i;
2559 2537
2560 2538 sleep = (fmr_attr->fmr_flags & IBT_MR_SLEEP) ? HERMON_SLEEP :
2561 2539 HERMON_NOSLEEP;
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
2562 2540 if ((sleep == HERMON_SLEEP) &&
2563 2541 (sleep != HERMON_SLEEPFLAG_FOR_CONTEXT())) {
2564 2542 return (IBT_INVALID_PARAM);
2565 2543 }
2566 2544
2567 2545 fmrpool = (hermon_fmrhdl_t)kmem_zalloc(sizeof (*fmrpool), sleep);
2568 2546 if (fmrpool == NULL) {
2569 2547 status = IBT_INSUFF_RESOURCE;
2570 2548 goto fail;
2571 2549 }
2572 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*fmrpool))
2573 2550
2574 2551 mutex_init(&fmrpool->fmr_lock, NULL, MUTEX_DRIVER,
2575 2552 DDI_INTR_PRI(state->hs_intrmsi_pri));
2576 2553 mutex_init(&fmrpool->remap_lock, NULL, MUTEX_DRIVER,
2577 2554 DDI_INTR_PRI(state->hs_intrmsi_pri));
2578 2555 mutex_init(&fmrpool->dirty_lock, NULL, MUTEX_DRIVER,
2579 2556 DDI_INTR_PRI(state->hs_intrmsi_pri));
2580 2557
2581 2558 fmrpool->fmr_state = state;
2582 2559 fmrpool->fmr_flush_function = fmr_attr->fmr_func_hdlr;
2583 2560 fmrpool->fmr_flush_arg = fmr_attr->fmr_func_arg;
2584 2561 fmrpool->fmr_pool_size = 0;
2585 2562 fmrpool->fmr_max_pages = fmr_attr->fmr_max_pages_per_fmr;
2586 2563 fmrpool->fmr_page_sz = fmr_attr->fmr_page_sz;
2587 2564 fmrpool->fmr_dirty_watermark = fmr_attr->fmr_pool_size / 4;
2588 2565 fmrpool->fmr_dirty_len = 0;
2589 2566 fmrpool->fmr_remap_watermark = fmr_attr->fmr_pool_size / 32;
2590 2567 fmrpool->fmr_remap_len = 0;
2591 2568 fmrpool->fmr_flags = fmr_attr->fmr_flags;
2592 2569 fmrpool->fmr_stat_register = 0;
2593 2570 fmrpool->fmr_max_remaps = state->hs_cfg_profile->cp_fmr_max_remaps;
2594 2571 fmrpool->fmr_remap_gen = 1;
2595 2572
2596 2573 fmrpool->fmr_free_list_tail = &fmrpool->fmr_free_list;
2597 2574 fmrpool->fmr_dirty_list = NULL;
2598 2575 fmrpool->fmr_dirty_list_tail = &fmrpool->fmr_dirty_list;
2599 2576 fmrpool->fmr_remap_list = NULL;
2600 2577 fmrpool->fmr_remap_list_tail = &fmrpool->fmr_remap_list;
2601 2578 fmrpool->fmr_pool_size = fmrpool->fmr_free_len =
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
2602 2579 fmr_attr->fmr_pool_size;
2603 2580
2604 2581 for (i = 0; i < fmr_attr->fmr_pool_size; i++) {
2605 2582 status = hermon_mr_alloc_fmr(state, pd, fmrpool, &mr);
2606 2583 if (status != DDI_SUCCESS) {
2607 2584 goto fail2;
2608 2585 }
2609 2586
2610 2587 fmr = (hermon_fmr_list_t *)kmem_zalloc(
2611 2588 sizeof (hermon_fmr_list_t), sleep);
2612 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*fmr))
2613 2589
2614 2590 fmr->fmr = mr;
2615 2591 fmr->fmr_remaps = 0;
2616 2592 fmr->fmr_remap_gen = fmrpool->fmr_remap_gen;
2617 2593 fmr->fmr_pool = fmrpool;
2618 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
2619 2594 mr->mr_fmr = fmr;
2620 2595
2621 2596 if (!i) /* address of last entry's link */
2622 2597 fmrpool->fmr_free_list_tail = &fmr->fmr_next;
2623 2598 fmr->fmr_next = fmrpool->fmr_free_list;
2624 2599 fmrpool->fmr_free_list = fmr;
2625 2600 }
2626 2601
2627 2602 /* Set to return pool */
2628 2603 *fmrpoolp = fmrpool;
2629 2604
2630 2605 IBTF_DPRINTF_L2("fmr", "create_fmr_pool SUCCESS");
2631 2606 return (IBT_SUCCESS);
2632 2607 fail2:
2633 2608 for (fmr = fmrpool->fmr_free_list; fmr != NULL; fmr = fmr_next) {
2634 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*fmr))
2635 2609 fmr_next = fmr->fmr_next;
2636 2610 (void) hermon_mr_dealloc_fmr(state, &fmr->fmr);
2637 2611 kmem_free(fmr, sizeof (hermon_fmr_list_t));
2638 2612 }
2639 2613 kmem_free(fmrpool, sizeof (*fmrpool));
2640 2614 fail:
2641 2615 *fmrpoolp = NULL;
2642 2616 IBTF_DPRINTF_L2("fmr", "create_fmr_pool FAILED");
2643 2617 if (status == DDI_FAILURE) {
2644 2618 return (ibc_get_ci_failure(0));
2645 2619 } else {
2646 2620 return (status);
2647 2621 }
2648 2622 }
2649 2623
2650 2624 /*
2651 2625 * hermon_destroy_fmr_pool()
2652 2626 * Destroy an FMR pool and free all associated resources.
2653 2627 * Context: Can be called from kernel context only.
2654 2628 */
2655 2629 int
2656 2630 hermon_destroy_fmr_pool(hermon_state_t *state, hermon_fmrhdl_t fmrpool)
2657 2631 {
2658 2632 hermon_fmr_list_t *fmr, *fmr_next;
2659 2633
2660 2634 mutex_enter(&fmrpool->fmr_lock);
2661 2635 hermon_fmr_cleanup(fmrpool);
2662 2636
2663 2637 for (fmr = fmrpool->fmr_free_list; fmr != NULL; fmr = fmr_next) {
2664 2638 fmr_next = fmr->fmr_next;
2665 2639
2666 2640 (void) hermon_mr_dealloc_fmr(state, &fmr->fmr);
2667 2641 kmem_free(fmr, sizeof (hermon_fmr_list_t));
2668 2642
2669 2643 --fmrpool->fmr_pool_size;
2670 2644 }
2671 2645 ASSERT(fmrpool->fmr_pool_size == 0);
2672 2646 mutex_exit(&fmrpool->fmr_lock);
2673 2647
2674 2648 mutex_destroy(&fmrpool->fmr_lock);
2675 2649 mutex_destroy(&fmrpool->dirty_lock);
2676 2650 mutex_destroy(&fmrpool->remap_lock);
2677 2651
2678 2652 kmem_free(fmrpool, sizeof (*fmrpool));
2679 2653 IBTF_DPRINTF_L2("fmr", "destroy_fmr_pool SUCCESS");
2680 2654 return (DDI_SUCCESS);
2681 2655 }
2682 2656
2683 2657 /*
2684 2658 * hermon_flush_fmr_pool()
2685 2659 * Ensure that all unmapped FMRs are fully invalidated.
2686 2660 * Context: Can be called from kernel context only.
2687 2661 */
2688 2662 /* ARGSUSED */
2689 2663 int
2690 2664 hermon_flush_fmr_pool(hermon_state_t *state, hermon_fmrhdl_t fmrpool)
2691 2665 {
2692 2666 /*
2693 2667 * Force the unmapping of all entries on the dirty list, regardless of
2694 2668 * whether the watermark has been hit yet.
2695 2669 */
2696 2670 /* grab the pool lock */
2697 2671 mutex_enter(&fmrpool->fmr_lock);
2698 2672 hermon_fmr_cleanup(fmrpool);
2699 2673 mutex_exit(&fmrpool->fmr_lock);
2700 2674 return (DDI_SUCCESS);
2701 2675 }
2702 2676
2703 2677 /*
2704 2678 * hermon_register_physical_fmr()
2705 2679 * Map memory into FMR
2706 2680 * Context: Can be called from interrupt or base context.
2707 2681 */
2708 2682 int
2709 2683 hermon_register_physical_fmr(hermon_state_t *state, hermon_fmrhdl_t fmrpool,
2710 2684 ibt_pmr_attr_t *mem_pattr, hermon_mrhdl_t *mr,
2711 2685 ibt_pmr_desc_t *mem_desc_p)
2712 2686 {
2713 2687 hermon_fmr_list_t *fmr;
2714 2688 int status;
2715 2689
2716 2690 /* Check length */
2717 2691 if (mem_pattr->pmr_len < 1 || (mem_pattr->pmr_num_buf >
2718 2692 fmrpool->fmr_max_pages)) {
2719 2693 return (IBT_MR_LEN_INVALID);
2720 2694 }
2721 2695
2722 2696 mutex_enter(&fmrpool->fmr_lock);
2723 2697 if (fmrpool->fmr_free_list == NULL) {
2724 2698 if (hermon_fmr_verbose & 2)
2725 2699 IBTF_DPRINTF_L2("fmr", "register needs remap");
2726 2700 mutex_enter(&fmrpool->remap_lock);
2727 2701 if (fmrpool->fmr_remap_list) {
2728 2702 /* add to free list */
2729 2703 *(fmrpool->fmr_free_list_tail) =
2730 2704 fmrpool->fmr_remap_list;
2731 2705 fmrpool->fmr_remap_list = NULL;
2732 2706 fmrpool->fmr_free_list_tail =
2733 2707 fmrpool->fmr_remap_list_tail;
2734 2708
2735 2709 /* reset list */
2736 2710 fmrpool->fmr_remap_list_tail = &fmrpool->fmr_remap_list;
2737 2711 fmrpool->fmr_free_len += fmrpool->fmr_remap_len;
2738 2712 fmrpool->fmr_remap_len = 0;
2739 2713 }
2740 2714 mutex_exit(&fmrpool->remap_lock);
2741 2715 }
2742 2716 if (fmrpool->fmr_free_list == NULL) {
2743 2717 if (hermon_fmr_verbose & 2)
2744 2718 IBTF_DPRINTF_L2("fmr", "register needs cleanup");
2745 2719 hermon_fmr_cleanup(fmrpool);
2746 2720 }
2747 2721
2748 2722 /* grab next free entry */
2749 2723 fmr = fmrpool->fmr_free_list;
2750 2724 if (fmr == NULL) {
2751 2725 IBTF_DPRINTF_L2("fmr", "WARNING: no free fmr resource");
2752 2726 cmn_err(CE_CONT, "no free fmr resource\n");
↓ open down ↓ |
108 lines elided |
↑ open up ↑ |
2753 2727 mutex_exit(&fmrpool->fmr_lock);
2754 2728 return (IBT_INSUFF_RESOURCE);
2755 2729 }
2756 2730
2757 2731 if ((fmrpool->fmr_free_list = fmr->fmr_next) == NULL)
2758 2732 fmrpool->fmr_free_list_tail = &fmrpool->fmr_free_list;
2759 2733 fmr->fmr_next = NULL;
2760 2734 fmrpool->fmr_stat_register++;
2761 2735 mutex_exit(&fmrpool->fmr_lock);
2762 2736
2763 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*fmr))
2764 2737 status = hermon_mr_register_physical_fmr(state, mem_pattr, fmr->fmr,
2765 2738 mem_desc_p);
2766 2739 if (status != DDI_SUCCESS) {
2767 2740 return (status);
2768 2741 }
2769 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*fmr->fmr))
2770 2742 if (hermon_rdma_debug & 0x4)
2771 2743 IBTF_DPRINTF_L2("fmr", " reg: mr %p key %x",
2772 2744 fmr->fmr, fmr->fmr->mr_rkey);
2773 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*fmr->fmr))
2774 2745 if (fmr->fmr_remap_gen != fmrpool->fmr_remap_gen) {
2775 2746 fmr->fmr_remap_gen = fmrpool->fmr_remap_gen;
2776 2747 fmr->fmr_remaps = 0;
2777 2748 }
2778 2749
2779 2750 fmr->fmr_remaps++;
2780 2751
2781 2752 *mr = (hermon_mrhdl_t)fmr->fmr;
2782 2753
2783 2754 return (DDI_SUCCESS);
2784 2755 }
2785 2756
2786 2757 /*
2787 2758 * hermon_deregister_fmr()
2788 2759 * Unmap FMR
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
2789 2760 * Context: Can be called from kernel context only.
2790 2761 */
2791 2762 int
2792 2763 hermon_deregister_fmr(hermon_state_t *state, hermon_mrhdl_t mr)
2793 2764 {
2794 2765 hermon_fmrhdl_t fmrpool;
2795 2766 hermon_fmr_list_t *fmr, **fmrlast;
2796 2767 int len;
2797 2768
2798 2769 fmr = mr->mr_fmr;
2799 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*fmr))
2800 2770 fmrpool = fmr->fmr_pool;
2801 2771
2802 2772 /* mark as owned by software */
2803 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*(fmr->fmr)))
2804 2773 *(uint8_t *)(fmr->fmr->mr_mptrsrcp->hr_addr) = 0xF0;
2805 2774
2806 2775 if (fmr->fmr_remaps <
2807 2776 state->hs_cfg_profile->cp_fmr_max_remaps) {
2808 2777 /* add to remap list */
2809 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*(fmr->fmr)))
2810 2778 if (hermon_rdma_debug & 0x4)
2811 2779 IBTF_DPRINTF_L2("fmr", "dereg: mr %p key %x",
2812 2780 fmr->fmr, fmr->fmr->mr_rkey);
2813 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*(fmr->fmr)))
2814 2781 mutex_enter(&fmrpool->remap_lock);
2815 2782 fmr->fmr_next = NULL;
2816 2783 *(fmrpool->fmr_remap_list_tail) = fmr;
2817 2784 fmrpool->fmr_remap_list_tail = &fmr->fmr_next;
2818 2785 fmrpool->fmr_remap_len++;
2819 2786
2820 2787 /* conditionally add remap list back to free list */
2821 2788 fmrlast = NULL;
2822 2789 if (fmrpool->fmr_remap_len >=
2823 2790 fmrpool->fmr_remap_watermark) {
2824 2791 fmr = fmrpool->fmr_remap_list;
2825 2792 fmrlast = fmrpool->fmr_remap_list_tail;
2826 2793 len = fmrpool->fmr_remap_len;
2827 2794 fmrpool->fmr_remap_len = 0;
2828 2795 fmrpool->fmr_remap_list = NULL;
2829 2796 fmrpool->fmr_remap_list_tail =
2830 2797 &fmrpool->fmr_remap_list;
2831 2798 }
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
2832 2799 mutex_exit(&fmrpool->remap_lock);
2833 2800 if (fmrlast) {
2834 2801 mutex_enter(&fmrpool->fmr_lock);
2835 2802 *(fmrpool->fmr_free_list_tail) = fmr;
2836 2803 fmrpool->fmr_free_list_tail = fmrlast;
2837 2804 fmrpool->fmr_free_len += len;
2838 2805 mutex_exit(&fmrpool->fmr_lock);
2839 2806 }
2840 2807 } else {
2841 2808 /* add to dirty list */
2842 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*(fmr->fmr)))
2843 2809 if (hermon_rdma_debug & 0x4)
2844 2810 IBTF_DPRINTF_L2("fmr", "dirty: mr %p key %x",
2845 2811 fmr->fmr, fmr->fmr->mr_rkey);
2846 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*(fmr->fmr)))
2847 2812
2848 2813 mutex_enter(&fmrpool->dirty_lock);
2849 2814 fmr->fmr_next = NULL;
2850 2815 *(fmrpool->fmr_dirty_list_tail) = fmr;
2851 2816 fmrpool->fmr_dirty_list_tail = &fmr->fmr_next;
2852 2817 fmrpool->fmr_dirty_len++;
2853 2818
2854 2819 if (fmrpool->fmr_dirty_len >=
2855 2820 fmrpool->fmr_dirty_watermark) {
2856 2821 mutex_exit(&fmrpool->dirty_lock);
2857 2822 mutex_enter(&fmrpool->fmr_lock);
2858 2823 hermon_fmr_cleanup(fmrpool);
2859 2824 mutex_exit(&fmrpool->fmr_lock);
2860 2825 } else
2861 2826 mutex_exit(&fmrpool->dirty_lock);
2862 2827 }
2863 2828 return (DDI_SUCCESS);
2864 2829 }
2865 2830
2866 2831 /*
2867 2832 * hermon_fmr_cleanup()
2868 2833 * Context: Called from any context.
2869 2834 */
2870 2835 static void
2871 2836 hermon_fmr_cleanup(hermon_fmrhdl_t fmrpool)
2872 2837 {
2873 2838 int status;
2874 2839
2875 2840 ASSERT(MUTEX_HELD(&fmrpool->fmr_lock));
2876 2841
2877 2842 if (fmrpool->fmr_stat_register == 0)
2878 2843 return;
2879 2844
2880 2845 fmrpool->fmr_stat_register = 0;
2881 2846 membar_producer();
2882 2847
2883 2848 if (hermon_fmr_verbose)
2884 2849 IBTF_DPRINTF_L2("fmr", "TPT_SYNC");
2885 2850 status = hermon_sync_tpt_cmd_post(fmrpool->fmr_state,
2886 2851 HERMON_CMD_NOSLEEP_SPIN);
2887 2852 if (status != HERMON_CMD_SUCCESS) {
2888 2853 cmn_err(CE_WARN, "fmr SYNC_TPT failed(%x)\n", status);
2889 2854 }
2890 2855 fmrpool->fmr_remap_gen++;
2891 2856
2892 2857 /* add everything back to the free list */
2893 2858 mutex_enter(&fmrpool->dirty_lock);
2894 2859 if (fmrpool->fmr_dirty_list) {
2895 2860 /* add to free list */
2896 2861 *(fmrpool->fmr_free_list_tail) = fmrpool->fmr_dirty_list;
2897 2862 fmrpool->fmr_dirty_list = NULL;
2898 2863 fmrpool->fmr_free_list_tail = fmrpool->fmr_dirty_list_tail;
2899 2864
2900 2865 /* reset list */
2901 2866 fmrpool->fmr_dirty_list_tail = &fmrpool->fmr_dirty_list;
2902 2867 fmrpool->fmr_free_len += fmrpool->fmr_dirty_len;
2903 2868 fmrpool->fmr_dirty_len = 0;
2904 2869 }
2905 2870 mutex_exit(&fmrpool->dirty_lock);
2906 2871
2907 2872 mutex_enter(&fmrpool->remap_lock);
2908 2873 if (fmrpool->fmr_remap_list) {
2909 2874 /* add to free list */
2910 2875 *(fmrpool->fmr_free_list_tail) = fmrpool->fmr_remap_list;
2911 2876 fmrpool->fmr_remap_list = NULL;
2912 2877 fmrpool->fmr_free_list_tail = fmrpool->fmr_remap_list_tail;
2913 2878
2914 2879 /* reset list */
2915 2880 fmrpool->fmr_remap_list_tail = &fmrpool->fmr_remap_list;
2916 2881 fmrpool->fmr_free_len += fmrpool->fmr_remap_len;
2917 2882 fmrpool->fmr_remap_len = 0;
2918 2883 }
2919 2884 mutex_exit(&fmrpool->remap_lock);
2920 2885
2921 2886 if (fmrpool->fmr_flush_function != NULL) {
2922 2887 (void) fmrpool->fmr_flush_function(
2923 2888 (ibc_fmr_pool_hdl_t)fmrpool,
2924 2889 fmrpool->fmr_flush_arg);
2925 2890 }
2926 2891 }
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX