Print this page
8368 remove warlock leftovers from usr/src/uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ib/adapters/hermon/hermon_umap.c
+++ new/usr/src/uts/common/io/ib/adapters/hermon/hermon_umap.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * hermon_umap.c
28 28 * Hermon Userland Mapping Routines
29 29 *
30 30 * Implements all the routines necessary for enabling direct userland
31 31 * access to the Hermon hardware. This includes all routines necessary for
32 32 * maintaining the "userland resources database" and all the support routines
33 33 * for the devmap calls.
34 34 */
35 35
36 36 #include <sys/types.h>
37 37 #include <sys/conf.h>
38 38 #include <sys/ddi.h>
39 39 #include <sys/sunddi.h>
40 40 #include <sys/modctl.h>
41 41 #include <sys/file.h>
42 42 #include <sys/avl.h>
43 43 #include <sys/sysmacros.h>
44 44
45 45 #include <sys/ib/adapters/hermon/hermon.h>
46 46
47 47 /* Hermon HCA state pointer (extern) */
48 48 extern void *hermon_statep;
49 49
50 50 /* Hermon HCA Userland Resource Database (extern) */
51 51 extern hermon_umap_db_t hermon_userland_rsrc_db;
52 52
53 53 static int hermon_umap_uarpg(hermon_state_t *state, devmap_cookie_t dhp,
54 54 hermon_rsrc_t *rsrcp, uint64_t offset, size_t *maplen, int *err);
55 55 static int hermon_umap_cqmem(hermon_state_t *state, devmap_cookie_t dhp,
56 56 hermon_rsrc_t *rsrcp, offset_t off, size_t *maplen, int *err);
57 57 static int hermon_umap_qpmem(hermon_state_t *state, devmap_cookie_t dhp,
58 58 hermon_rsrc_t *rsrcp, offset_t off, size_t *maplen, int *err);
59 59 static int hermon_umap_srqmem(hermon_state_t *state, devmap_cookie_t dhp,
60 60 hermon_rsrc_t *rsrcp, offset_t off, size_t *maplen, int *err);
61 61 static int hermon_umap_dbrecmem(hermon_state_t *state, devmap_cookie_t dhp,
62 62 hermon_rsrc_t *rsrcp, offset_t off, size_t *maplen, int *err);
63 63 static int hermon_devmap_umem_map(devmap_cookie_t dhp, dev_t dev, uint_t flags,
64 64 offset_t off, size_t len, void **pvtp);
65 65 static int hermon_devmap_umem_dup(devmap_cookie_t dhp, void *pvtp,
66 66 devmap_cookie_t new_dhp, void **new_pvtp);
67 67 static void hermon_devmap_umem_unmap(devmap_cookie_t dhp, void *pvtp,
68 68 offset_t off, size_t len, devmap_cookie_t new_dhp1, void **pvtp1,
69 69 devmap_cookie_t new_dhp2, void **pvtp2);
70 70 static int hermon_devmap_dbrecmem_map(devmap_cookie_t dhp, dev_t dev,
71 71 uint_t flags, offset_t off, size_t len, void **pvtp);
72 72 static int hermon_devmap_dbrecmem_dup(devmap_cookie_t dhp, void *pvtp,
73 73 devmap_cookie_t new_dhp, void **new_pvtp);
74 74 static void hermon_devmap_dbrecmem_unmap(devmap_cookie_t dhp, void *pvtp,
75 75 offset_t off, size_t len, devmap_cookie_t new_dhp1, void **pvtp1,
76 76 devmap_cookie_t new_dhp2, void **pvtp2);
77 77 static int hermon_devmap_devmem_map(devmap_cookie_t dhp, dev_t dev,
78 78 uint_t flags, offset_t off, size_t len, void **pvtp);
79 79 static int hermon_devmap_devmem_dup(devmap_cookie_t dhp, void *pvtp,
80 80 devmap_cookie_t new_dhp, void **new_pvtp);
81 81 static void hermon_devmap_devmem_unmap(devmap_cookie_t dhp, void *pvtp,
82 82 offset_t off, size_t len, devmap_cookie_t new_dhp1, void **pvtp1,
83 83 devmap_cookie_t new_dhp2, void **pvtp2);
84 84 static ibt_status_t hermon_umap_mr_data_in(hermon_mrhdl_t mr,
85 85 ibt_mr_data_in_t *data, size_t data_sz);
86 86 static ibt_status_t hermon_umap_cq_data_out(hermon_cqhdl_t cq,
87 87 mlnx_umap_cq_data_out_t *data, size_t data_sz);
88 88 static ibt_status_t hermon_umap_qp_data_out(hermon_qphdl_t qp,
89 89 mlnx_umap_qp_data_out_t *data, size_t data_sz);
90 90 static ibt_status_t hermon_umap_srq_data_out(hermon_srqhdl_t srq,
91 91 mlnx_umap_srq_data_out_t *data, size_t data_sz);
92 92 static ibt_status_t hermon_umap_pd_data_out(hermon_pdhdl_t pd,
93 93 mlnx_umap_pd_data_out_t *data, size_t data_sz);
94 94 static int hermon_umap_db_compare(const void *query, const void *entry);
95 95
96 96
97 97 /*
98 98 * These callbacks are passed to devmap_umem_setup() and devmap_devmem_setup(),
99 99 * respectively. They are used to handle (among other things) partial
100 100 * unmappings and to provide a method for invalidating mappings inherited
101 101 * as a result of a fork(2) system call.
102 102 */
103 103 static struct devmap_callback_ctl hermon_devmap_umem_cbops = {
104 104 DEVMAP_OPS_REV,
105 105 hermon_devmap_umem_map,
106 106 NULL,
107 107 hermon_devmap_umem_dup,
108 108 hermon_devmap_umem_unmap
109 109 };
110 110 static struct devmap_callback_ctl hermon_devmap_devmem_cbops = {
111 111 DEVMAP_OPS_REV,
112 112 hermon_devmap_devmem_map,
113 113 NULL,
114 114 hermon_devmap_devmem_dup,
115 115 hermon_devmap_devmem_unmap
116 116 };
117 117 static struct devmap_callback_ctl hermon_devmap_dbrecmem_cbops = {
118 118 DEVMAP_OPS_REV,
119 119 hermon_devmap_dbrecmem_map,
120 120 NULL,
121 121 hermon_devmap_dbrecmem_dup,
122 122 hermon_devmap_dbrecmem_unmap
123 123 };
124 124
125 125 /*
126 126 * hermon_devmap()
127 127 * Context: Can be called from user context.
128 128 */
129 129 /* ARGSUSED */
130 130 int
131 131 hermon_devmap(dev_t dev, devmap_cookie_t dhp, offset_t off, size_t len,
132 132 size_t *maplen, uint_t model)
133 133 {
134 134 hermon_state_t *state;
135 135 hermon_rsrc_t *rsrcp;
136 136 minor_t instance;
137 137 uint64_t key, value;
138 138 uint64_t bf_offset = 0;
139 139 uint_t type;
140 140 int err, status;
141 141
142 142 /* Get Hermon softstate structure from instance */
143 143 instance = HERMON_DEV_INSTANCE(dev);
144 144 state = ddi_get_soft_state(hermon_statep, instance);
145 145 if (state == NULL) {
146 146 return (ENXIO);
147 147 }
148 148
149 149 /*
150 150 * Access to Hermon devmap interface is not allowed in
151 151 * "maintenance mode".
152 152 */
153 153 if (state->hs_operational_mode == HERMON_MAINTENANCE_MODE) {
154 154 return (EFAULT);
155 155 }
156 156
157 157 /*
158 158 * The bottom bits of "offset" are undefined (number depends on
159 159 * system PAGESIZE). Shifting these off leaves us with a "key".
160 160 * The "key" is actually a combination of both a real key value
161 161 * (for the purpose of database lookup) and a "type" value. We
162 162 * extract this information before doing the database lookup.
163 163 */
164 164 key = off >> PAGESHIFT;
165 165 type = key & MLNX_UMAP_RSRC_TYPE_MASK;
166 166 key = key >> MLNX_UMAP_RSRC_TYPE_SHIFT;
167 167 if (type == MLNX_UMAP_BLUEFLAMEPG_RSRC) {
168 168 if (state->hs_devlim.blu_flm == 0) {
169 169 return (EFAULT);
170 170 }
171 171 bf_offset = state->hs_bf_offset;
172 172 type = MLNX_UMAP_UARPG_RSRC;
173 173 }
174 174 status = hermon_umap_db_find(instance, key, type, &value, 0, NULL);
175 175 if (status == DDI_SUCCESS) {
176 176 rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
177 177
178 178 switch (type) {
179 179 case MLNX_UMAP_UARPG_RSRC:
180 180 /*
181 181 * Double check that process who open()'d Hermon is
182 182 * same process attempting to mmap() UAR page.
183 183 */
184 184 if (key != ddi_get_pid()) {
185 185 return (EINVAL);
186 186 }
187 187
188 188 /* Map the UAR page out for userland access */
189 189 status = hermon_umap_uarpg(state, dhp, rsrcp, bf_offset,
190 190 maplen, &err);
191 191 if (status != DDI_SUCCESS) {
192 192 return (err);
193 193 }
194 194 break;
195 195
196 196 case MLNX_UMAP_CQMEM_RSRC:
197 197 /* Map the CQ memory out for userland access */
198 198 status = hermon_umap_cqmem(state, dhp, rsrcp, off,
199 199 maplen, &err);
200 200 if (status != DDI_SUCCESS) {
201 201 return (err);
202 202 }
203 203 break;
204 204
205 205 case MLNX_UMAP_QPMEM_RSRC:
206 206 /* Map the QP memory out for userland access */
207 207 status = hermon_umap_qpmem(state, dhp, rsrcp, off,
208 208 maplen, &err);
209 209 if (status != DDI_SUCCESS) {
210 210 return (err);
211 211 }
212 212 break;
213 213
214 214 case MLNX_UMAP_SRQMEM_RSRC:
215 215 /* Map the SRQ memory out for userland access */
216 216 status = hermon_umap_srqmem(state, dhp, rsrcp, off,
217 217 maplen, &err);
218 218 if (status != DDI_SUCCESS) {
219 219 return (err);
220 220 }
221 221 break;
222 222
223 223 case MLNX_UMAP_DBRMEM_RSRC:
224 224 /*
225 225 * Map the doorbell record memory out for
226 226 * userland access
227 227 */
228 228 status = hermon_umap_dbrecmem(state, dhp, rsrcp, off,
229 229 maplen, &err);
230 230 if (status != DDI_SUCCESS) {
231 231 return (err);
232 232 }
233 233 break;
234 234
235 235 default:
236 236 HERMON_WARNING(state, "unexpected rsrc type in devmap");
237 237 return (EINVAL);
238 238 }
239 239 } else {
240 240 return (EINVAL);
241 241 }
242 242
243 243 return (0);
244 244 }
245 245
246 246
247 247 /*
248 248 * hermon_umap_uarpg()
249 249 * Context: Can be called from user context.
250 250 */
251 251 static int
252 252 hermon_umap_uarpg(hermon_state_t *state, devmap_cookie_t dhp,
253 253 hermon_rsrc_t *rsrcp, uint64_t offset, size_t *maplen, int *err)
254 254 {
255 255 int status;
256 256 uint_t maxprot;
257 257 ddi_device_acc_attr_t *accattrp = &state->hs_reg_accattr;
258 258 ddi_device_acc_attr_t accattr;
259 259
260 260 if (offset != 0) { /* Hermon Blueflame */
261 261 /* Try to use write coalescing data ordering */
262 262 accattr = *accattrp;
263 263 accattr.devacc_attr_dataorder = DDI_STORECACHING_OK_ACC;
264 264 accattrp = &accattr;
265 265 }
266 266
267 267 /* Map out the UAR page (doorbell page) */
268 268 maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
269 269 status = devmap_devmem_setup(dhp, state->hs_dip,
270 270 &hermon_devmap_devmem_cbops, HERMON_UAR_BAR, (rsrcp->hr_indx <<
271 271 PAGESHIFT) + offset, PAGESIZE, maxprot, DEVMAP_ALLOW_REMAP,
272 272 accattrp);
273 273 if (status < 0) {
274 274 *err = status;
275 275 return (DDI_FAILURE);
276 276 }
277 277
278 278 *maplen = PAGESIZE;
279 279 return (DDI_SUCCESS);
280 280 }
281 281
282 282
283 283 /*
284 284 * hermon_umap_cqmem()
285 285 * Context: Can be called from user context.
286 286 */
287 287 /* ARGSUSED */
288 288 static int
289 289 hermon_umap_cqmem(hermon_state_t *state, devmap_cookie_t dhp,
290 290 hermon_rsrc_t *rsrcp, offset_t off, size_t *maplen, int *err)
291 291 {
292 292 hermon_cqhdl_t cq;
293 293 size_t size;
294 294 uint_t maxprot;
295 295 int status;
296 296
297 297 /* Extract the Hermon CQ handle pointer from the hermon_rsrc_t */
298 298 cq = (hermon_cqhdl_t)rsrcp->hr_addr;
299 299
300 300 /* Round-up the CQ size to system page size */
301 301 size = ptob(btopr(cq->cq_resize_hdl ?
302 302 cq->cq_resize_hdl->cq_cqinfo.qa_size : cq->cq_cqinfo.qa_size));
303 303
304 304 /* Map out the CQ memory - use resize_hdl if non-NULL */
305 305 maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
306 306 status = devmap_umem_setup(dhp, state->hs_dip,
307 307 &hermon_devmap_umem_cbops, cq->cq_resize_hdl ?
308 308 cq->cq_resize_hdl->cq_cqinfo.qa_umemcookie :
309 309 cq->cq_cqinfo.qa_umemcookie, 0, size,
310 310 maxprot, (DEVMAP_ALLOW_REMAP | DEVMAP_DEFAULTS), NULL);
311 311 if (status < 0) {
312 312 *err = status;
313 313 return (DDI_FAILURE);
314 314 }
315 315 *maplen = size;
316 316
317 317 return (DDI_SUCCESS);
318 318 }
319 319
320 320
321 321 /*
322 322 * hermon_umap_qpmem()
323 323 * Context: Can be called from user context.
324 324 */
325 325 /* ARGSUSED */
326 326 static int
327 327 hermon_umap_qpmem(hermon_state_t *state, devmap_cookie_t dhp,
328 328 hermon_rsrc_t *rsrcp, offset_t off, size_t *maplen, int *err)
329 329 {
330 330 hermon_qphdl_t qp;
331 331 offset_t offset;
332 332 size_t size;
333 333 uint_t maxprot;
334 334 int status;
335 335
336 336 /* Extract the Hermon QP handle pointer from the hermon_rsrc_t */
337 337 qp = (hermon_qphdl_t)rsrcp->hr_addr;
338 338
339 339 /*
340 340 * Calculate the offset of the first work queue (send or recv) into
341 341 * the memory (ddi_umem_alloc()) allocated previously for the QP.
342 342 */
343 343 offset = (offset_t)((uintptr_t)qp->qp_wqinfo.qa_buf_aligned -
344 344 (uintptr_t)qp->qp_wqinfo.qa_buf_real);
345 345
346 346 /* Round-up the QP work queue sizes to system page size */
347 347 size = ptob(btopr(qp->qp_wqinfo.qa_size));
348 348
349 349 /* Map out the QP memory */
350 350 maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
351 351 status = devmap_umem_setup(dhp, state->hs_dip,
352 352 &hermon_devmap_umem_cbops, qp->qp_wqinfo.qa_umemcookie, offset,
353 353 size, maxprot, (DEVMAP_ALLOW_REMAP | DEVMAP_DEFAULTS), NULL);
354 354 if (status < 0) {
355 355 *err = status;
356 356 return (DDI_FAILURE);
357 357 }
358 358 *maplen = size;
359 359
360 360 return (DDI_SUCCESS);
361 361 }
362 362
363 363
364 364 /*
365 365 * hermon_umap_srqmem()
366 366 * Context: Can be called from user context.
367 367 */
368 368 /* ARGSUSED */
369 369 static int
370 370 hermon_umap_srqmem(hermon_state_t *state, devmap_cookie_t dhp,
371 371 hermon_rsrc_t *rsrcp, offset_t off, size_t *maplen, int *err)
372 372 {
373 373 hermon_srqhdl_t srq;
374 374 offset_t offset;
375 375 size_t size;
376 376 uint_t maxprot;
377 377 int status;
378 378
379 379 /* Extract the Hermon SRQ handle pointer from the hermon_rsrc_t */
380 380 srq = (hermon_srqhdl_t)rsrcp->hr_addr;
381 381
382 382 /*
383 383 * Calculate the offset of the first shared recv queue into the memory
384 384 * (ddi_umem_alloc()) allocated previously for the SRQ.
385 385 */
386 386 offset = (offset_t)((uintptr_t)srq->srq_wqinfo.qa_buf_aligned -
387 387 (uintptr_t)srq->srq_wqinfo.qa_buf_real);
388 388
389 389 /* Round-up the SRQ work queue sizes to system page size */
390 390 size = ptob(btopr(srq->srq_wqinfo.qa_size));
391 391
392 392 /* Map out the SRQ memory */
393 393 maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
394 394 status = devmap_umem_setup(dhp, state->hs_dip,
395 395 &hermon_devmap_umem_cbops, srq->srq_wqinfo.qa_umemcookie, offset,
396 396 size, maxprot, (DEVMAP_ALLOW_REMAP | DEVMAP_DEFAULTS), NULL);
397 397 if (status < 0) {
398 398 *err = status;
399 399 return (DDI_FAILURE);
400 400 }
401 401 *maplen = size;
402 402
403 403 return (DDI_SUCCESS);
404 404 }
405 405
406 406
407 407 /*
408 408 * hermon_devmap_dbrecmem()
409 409 * Context: Can be called from user context.
410 410 */
411 411 /* ARGSUSED */
412 412 static int
413 413 hermon_umap_dbrecmem(hermon_state_t *state, devmap_cookie_t dhp,
414 414 hermon_rsrc_t *rsrcp, offset_t off, size_t *maplen, int *err)
415 415 {
416 416 hermon_udbr_page_t *pagep;
417 417 offset_t offset;
418 418 size_t size;
419 419 uint_t maxprot;
420 420 int status;
421 421
422 422 /* We stored the udbr_page pointer, and not a hermon_rsrc_t */
423 423 pagep = (hermon_udbr_page_t *)rsrcp;
424 424
425 425 /*
426 426 * Calculate the offset of the doorbell records into the memory
427 427 * (ddi_umem_alloc()) allocated previously for them.
428 428 */
429 429 offset = 0;
430 430
431 431 /* Round-up the doorbell page to system page size */
432 432 size = PAGESIZE;
433 433
434 434 /* Map out the Doorbell Record memory */
435 435 maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
436 436 status = devmap_umem_setup(dhp, state->hs_dip,
437 437 &hermon_devmap_dbrecmem_cbops, pagep->upg_umemcookie, offset,
438 438 size, maxprot, (DEVMAP_ALLOW_REMAP | DEVMAP_DEFAULTS), NULL);
439 439 if (status < 0) {
440 440 *err = status;
441 441 return (DDI_FAILURE);
442 442 }
443 443 *maplen = size;
444 444
445 445 return (DDI_SUCCESS);
446 446 }
447 447
448 448
449 449 /*
450 450 * hermon_devmap_umem_map()
451 451 * Context: Can be called from kernel context.
452 452 */
453 453 /* ARGSUSED */
454 454 static int
455 455 hermon_devmap_umem_map(devmap_cookie_t dhp, dev_t dev, uint_t flags,
456 456 offset_t off, size_t len, void **pvtp)
457 457 {
458 458 hermon_state_t *state;
459 459 hermon_devmap_track_t *dvm_track;
460 460 hermon_cqhdl_t cq;
461 461 hermon_qphdl_t qp;
462 462 hermon_srqhdl_t srq;
463 463 minor_t instance;
464 464 uint64_t key;
465 465 uint_t type;
466 466
467 467 /* Get Hermon softstate structure from instance */
468 468 instance = HERMON_DEV_INSTANCE(dev);
469 469 state = ddi_get_soft_state(hermon_statep, instance);
470 470 if (state == NULL) {
471 471 return (ENXIO);
472 472 }
473 473
474 474 /*
475 475 * The bottom bits of "offset" are undefined (number depends on
476 476 * system PAGESIZE). Shifting these off leaves us with a "key".
477 477 * The "key" is actually a combination of both a real key value
478 478 * (for the purpose of database lookup) and a "type" value. Although
479 479 * we are not going to do any database lookup per se, we do want
480 480 * to extract the "key" and the "type" (to enable faster lookup of
481 481 * the appropriate CQ or QP handle).
482 482 */
↓ open down ↓ |
482 lines elided |
↑ open up ↑ |
483 483 key = off >> PAGESHIFT;
484 484 type = key & MLNX_UMAP_RSRC_TYPE_MASK;
485 485 key = key >> MLNX_UMAP_RSRC_TYPE_SHIFT;
486 486
487 487 /*
488 488 * Allocate an entry to track the mapping and unmapping (specifically,
489 489 * partial unmapping) of this resource.
490 490 */
491 491 dvm_track = (hermon_devmap_track_t *)kmem_zalloc(
492 492 sizeof (hermon_devmap_track_t), KM_SLEEP);
493 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
494 493 dvm_track->hdt_offset = off;
495 494 dvm_track->hdt_state = state;
496 495 dvm_track->hdt_refcnt = 1;
497 496 mutex_init(&dvm_track->hdt_lock, NULL, MUTEX_DRIVER,
498 497 DDI_INTR_PRI(state->hs_intrmsi_pri));
499 498
500 499 /*
501 500 * Depending of the type of resource that has been mapped out, we
502 501 * need to update the QP or CQ handle to reflect that it has, in
503 502 * fact, been mapped. This allows the driver code which frees a QP
504 503 * or a CQ to know whether it is appropriate to do a
505 504 * devmap_devmem_remap() to invalidate the userland mapping for the
506 505 * corresponding queue's memory.
507 506 */
508 507 if (type == MLNX_UMAP_CQMEM_RSRC) {
509 508
510 509 /* Use "key" (CQ number) to do fast lookup of CQ handle */
511 510 cq = hermon_cqhdl_from_cqnum(state, key);
512 511
513 512 /*
514 513 * Update the handle to the userland mapping. Note: If
515 514 * the CQ already has a valid userland mapping, then stop
516 515 * and return failure.
517 516 */
518 517 mutex_enter(&cq->cq_lock);
519 518 if (cq->cq_umap_dhp == NULL) {
520 519 cq->cq_umap_dhp = dhp;
521 520 dvm_track->hdt_size = cq->cq_cqinfo.qa_size;
522 521 mutex_exit(&cq->cq_lock);
523 522 } else if (cq->cq_resize_hdl &&
524 523 (cq->cq_resize_hdl->cq_umap_dhp == NULL)) {
525 524 cq->cq_resize_hdl->cq_umap_dhp = dhp;
526 525 dvm_track->hdt_size =
527 526 cq->cq_resize_hdl->cq_cqinfo.qa_size;
528 527 mutex_exit(&cq->cq_lock);
529 528 } else {
530 529 mutex_exit(&cq->cq_lock);
531 530 goto umem_map_fail;
532 531 }
533 532
534 533 } else if (type == MLNX_UMAP_QPMEM_RSRC) {
535 534
536 535 /* Use "key" (QP number) to do fast lookup of QP handle */
537 536 qp = hermon_qphdl_from_qpnum(state, key);
538 537
539 538 /*
540 539 * Update the handle to the userland mapping. Note: If
541 540 * the CQ already has a valid userland mapping, then stop
542 541 * and return failure.
543 542 */
544 543 mutex_enter(&qp->qp_lock);
545 544 if (qp->qp_umap_dhp == NULL) {
546 545 qp->qp_umap_dhp = dhp;
547 546 dvm_track->hdt_size = qp->qp_wqinfo.qa_size;
548 547 mutex_exit(&qp->qp_lock);
549 548 } else {
550 549 mutex_exit(&qp->qp_lock);
551 550 goto umem_map_fail;
552 551 }
553 552
554 553 } else if (type == MLNX_UMAP_SRQMEM_RSRC) {
555 554
556 555 /* Use "key" (SRQ number) to do fast lookup on SRQ handle */
557 556 srq = hermon_srqhdl_from_srqnum(state, key);
558 557
559 558 /*
560 559 * Update the handle to the userland mapping. Note: If the
561 560 * SRQ already has a valid userland mapping, then stop and
562 561 * return failure.
563 562 */
564 563 mutex_enter(&srq->srq_lock);
565 564 if (srq->srq_umap_dhp == NULL) {
566 565 srq->srq_umap_dhp = dhp;
567 566 dvm_track->hdt_size = srq->srq_wqinfo.qa_size;
568 567 mutex_exit(&srq->srq_lock);
569 568 } else {
570 569 mutex_exit(&srq->srq_lock);
571 570 goto umem_map_fail;
572 571 }
573 572 }
574 573
575 574 /*
576 575 * Pass the private "Hermon devmap tracking structure" back. This
577 576 * pointer will be returned in subsequent "unmap" callbacks.
578 577 */
579 578 *pvtp = dvm_track;
580 579
581 580 return (DDI_SUCCESS);
582 581
583 582 umem_map_fail:
584 583 mutex_destroy(&dvm_track->hdt_lock);
585 584 kmem_free(dvm_track, sizeof (hermon_devmap_track_t));
586 585 return (DDI_FAILURE);
587 586 }
588 587
589 588
590 589 /*
591 590 * hermon_devmap_umem_dup()
592 591 * Context: Can be called from kernel context.
593 592 */
594 593 /* ARGSUSED */
595 594 static int
596 595 hermon_devmap_umem_dup(devmap_cookie_t dhp, void *pvtp, devmap_cookie_t new_dhp,
597 596 void **new_pvtp)
598 597 {
↓ open down ↓ |
95 lines elided |
↑ open up ↑ |
599 598 hermon_state_t *state;
600 599 hermon_devmap_track_t *dvm_track, *new_dvm_track;
601 600 uint_t maxprot;
602 601 int status;
603 602
604 603 /*
605 604 * Extract the Hermon softstate pointer from "Hermon devmap tracking
606 605 * structure" (in "pvtp").
607 606 */
608 607 dvm_track = (hermon_devmap_track_t *)pvtp;
609 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
610 608 state = dvm_track->hdt_state;
611 609
612 610 /*
613 611 * Since this devmap_dup() entry point is generally called
614 612 * when a process does fork(2), it is incumbent upon the driver
615 613 * to insure that the child does not inherit a valid copy of
616 614 * the parent's QP or CQ resource. This is accomplished by using
617 615 * devmap_devmem_remap() to invalidate the child's mapping to the
618 616 * kernel memory.
619 617 */
620 618 maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
621 619 status = devmap_devmem_remap(new_dhp, state->hs_dip, 0, 0,
622 620 dvm_track->hdt_size, maxprot, DEVMAP_MAPPING_INVALID, NULL);
623 621 if (status != DDI_SUCCESS) {
624 622 HERMON_WARNING(state, "failed in hermon_devmap_umem_dup()");
625 623 return (status);
626 624 }
627 625
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
628 626 /*
629 627 * Allocate a new entry to track the subsequent unmapping
630 628 * (specifically, all partial unmappings) of the child's newly
631 629 * invalidated resource. Note: Setting the "hdt_size" field to
632 630 * zero here is an indication to the devmap_unmap() entry point
633 631 * that this mapping is invalid, and that its subsequent unmapping
634 632 * should not affect any of the parent's CQ or QP resources.
635 633 */
636 634 new_dvm_track = (hermon_devmap_track_t *)kmem_zalloc(
637 635 sizeof (hermon_devmap_track_t), KM_SLEEP);
638 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*new_dvm_track))
639 636 new_dvm_track->hdt_offset = 0;
640 637 new_dvm_track->hdt_state = state;
641 638 new_dvm_track->hdt_refcnt = 1;
642 639 new_dvm_track->hdt_size = 0;
643 640 mutex_init(&new_dvm_track->hdt_lock, NULL, MUTEX_DRIVER,
644 641 DDI_INTR_PRI(state->hs_intrmsi_pri));
645 642 *new_pvtp = new_dvm_track;
646 643
647 644 return (DDI_SUCCESS);
648 645 }
649 646
650 647
651 648 /*
652 649 * hermon_devmap_umem_unmap()
653 650 * Context: Can be called from kernel context.
654 651 */
655 652 /* ARGSUSED */
656 653 static void
657 654 hermon_devmap_umem_unmap(devmap_cookie_t dhp, void *pvtp, offset_t off,
658 655 size_t len, devmap_cookie_t new_dhp1, void **pvtp1,
659 656 devmap_cookie_t new_dhp2, void **pvtp2)
660 657 {
661 658 hermon_state_t *state;
662 659 hermon_rsrc_t *rsrcp;
663 660 hermon_devmap_track_t *dvm_track;
664 661 hermon_cqhdl_t cq;
665 662 hermon_qphdl_t qp;
666 663 hermon_srqhdl_t srq;
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
667 664 uint64_t key, value;
668 665 uint_t type;
669 666 uint_t size;
670 667 int status;
671 668
672 669 /*
673 670 * Extract the Hermon softstate pointer from "Hermon devmap tracking
674 671 * structure" (in "pvtp").
675 672 */
676 673 dvm_track = (hermon_devmap_track_t *)pvtp;
677 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
678 674 state = dvm_track->hdt_state;
679 675
680 676 /*
681 677 * Extract the "offset" from the "Hermon devmap tracking structure".
682 678 * Note: The input argument "off" is ignored here because the
683 679 * Hermon mapping interfaces define a very specific meaning to
684 680 * each "logical offset". Also extract the "key" and "type" encoded
685 681 * in the logical offset.
686 682 */
687 683 key = dvm_track->hdt_offset >> PAGESHIFT;
688 684 type = key & MLNX_UMAP_RSRC_TYPE_MASK;
689 685 key = key >> MLNX_UMAP_RSRC_TYPE_SHIFT;
690 686
691 687 /*
692 688 * Extract the "size" of the mapping. If this size is determined
693 689 * to be zero, then it is an indication of a previously invalidated
694 690 * mapping, and no CQ or QP resources should be affected.
695 691 */
696 692 size = dvm_track->hdt_size;
697 693
698 694 /*
699 695 * If only the "middle portion of a given mapping is being unmapped,
700 696 * then we are effectively creating one new piece of mapped memory.
701 697 * (Original region is divided into three pieces of which the middle
702 698 * piece is being removed. This leaves two pieces. Since we started
703 699 * with one piece and now have two pieces, we need to increment the
704 700 * counter in the "Hermon devmap tracking structure".
705 701 *
706 702 * If, however, the whole mapped region is being unmapped, then we
707 703 * have started with one region which we are completely removing.
708 704 * In this case, we need to decrement the counter in the "Hermon
709 705 * devmap tracking structure".
710 706 *
711 707 * In each of the remaining cases, we will have started with one
712 708 * mapped region and ended with one (different) region. So no counter
713 709 * modification is necessary.
714 710 */
715 711 mutex_enter(&dvm_track->hdt_lock);
716 712 if ((new_dhp1 == NULL) && (new_dhp2 == NULL)) {
717 713 dvm_track->hdt_refcnt--;
718 714 } else if ((new_dhp1 != NULL) && (new_dhp2 != NULL)) {
719 715 dvm_track->hdt_refcnt++;
720 716 }
721 717 mutex_exit(&dvm_track->hdt_lock);
722 718
723 719 /*
724 720 * For each of the cases where the region is being divided, then we
725 721 * need to pass back the "Hermon devmap tracking structure". This way
726 722 * we get it back when each of the remaining pieces is subsequently
727 723 * unmapped.
728 724 */
729 725 if (new_dhp1 != NULL) {
730 726 *pvtp1 = pvtp;
731 727 }
732 728 if (new_dhp2 != NULL) {
733 729 *pvtp2 = pvtp;
734 730 }
735 731
736 732 /*
737 733 * If the "Hermon devmap tracking structure" is no longer being
738 734 * referenced, then free it up. Otherwise, return.
739 735 */
740 736 if (dvm_track->hdt_refcnt == 0) {
741 737 mutex_destroy(&dvm_track->hdt_lock);
742 738 kmem_free(dvm_track, sizeof (hermon_devmap_track_t));
743 739
744 740 /*
745 741 * If the mapping was invalid (see explanation above), then
746 742 * no further processing is necessary.
747 743 */
748 744 if (size == 0) {
749 745 return;
750 746 }
751 747 } else {
752 748 return;
753 749 }
754 750
755 751 /*
756 752 * Now that we can guarantee that the user memory is fully unmapped,
757 753 * we can use the "key" and "type" values to try to find the entry
758 754 * in the "userland resources database". If it's found, then it
759 755 * indicates that the queue memory (CQ or QP) has not yet been freed.
760 756 * In this case, we update the corresponding CQ or QP handle to
761 757 * indicate that the "devmap_devmem_remap()" call will be unnecessary.
762 758 * If it's _not_ found, then it indicates that the CQ or QP memory
763 759 * was, in fact, freed before it was unmapped (thus requiring a
764 760 * previous invalidation by remapping - which will already have
765 761 * been done in the free routine).
766 762 */
767 763 status = hermon_umap_db_find(state->hs_instance, key, type, &value,
768 764 0, NULL);
769 765 if (status == DDI_SUCCESS) {
770 766 /*
771 767 * Depending on the type of the mapped resource (CQ or QP),
772 768 * update handle to indicate that no invalidation remapping
773 769 * will be necessary.
774 770 */
775 771 if (type == MLNX_UMAP_CQMEM_RSRC) {
776 772
777 773 /* Use "value" to convert to CQ handle */
778 774 rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
779 775 cq = (hermon_cqhdl_t)rsrcp->hr_addr;
780 776
781 777 /*
782 778 * Invalidate the handle to the userland mapping.
783 779 * Note: We must ensure that the mapping being
784 780 * unmapped here is the current one for the CQ. It
785 781 * is possible that it might not be if this CQ has
786 782 * been resized and the previous CQ memory has not
787 783 * yet been unmapped. But in that case, because of
788 784 * the devmap_devmem_remap(), there is no longer any
789 785 * association between the mapping and the real CQ
790 786 * kernel memory.
791 787 */
792 788 mutex_enter(&cq->cq_lock);
793 789 if (cq->cq_umap_dhp == dhp) {
794 790 cq->cq_umap_dhp = NULL;
795 791 if (cq->cq_resize_hdl) {
796 792 /* resize is DONE, switch queues */
797 793 hermon_cq_resize_helper(state, cq);
798 794 }
799 795 } else {
800 796 if (cq->cq_resize_hdl &&
801 797 cq->cq_resize_hdl->cq_umap_dhp == dhp) {
802 798 /*
803 799 * Unexpected case. munmap of the
804 800 * cq_resize buf, and not the
805 801 * original buf.
806 802 */
807 803 cq->cq_resize_hdl->cq_umap_dhp = NULL;
808 804 }
809 805 }
810 806 mutex_exit(&cq->cq_lock);
811 807
812 808 } else if (type == MLNX_UMAP_QPMEM_RSRC) {
813 809
814 810 /* Use "value" to convert to QP handle */
815 811 rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
816 812 qp = (hermon_qphdl_t)rsrcp->hr_addr;
817 813
818 814 /*
819 815 * Invalidate the handle to the userland mapping.
820 816 * Note: we ensure that the mapping being unmapped
821 817 * here is the current one for the QP. This is
822 818 * more of a sanity check here since, unlike CQs
823 819 * (above) we do not support resize of QPs.
824 820 */
825 821 mutex_enter(&qp->qp_lock);
826 822 if (qp->qp_umap_dhp == dhp) {
827 823 qp->qp_umap_dhp = NULL;
828 824 }
829 825 mutex_exit(&qp->qp_lock);
830 826
831 827 } else if (type == MLNX_UMAP_SRQMEM_RSRC) {
832 828
833 829 /* Use "value" to convert to SRQ handle */
834 830 rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
835 831 srq = (hermon_srqhdl_t)rsrcp->hr_addr;
836 832
837 833 /*
838 834 * Invalidate the handle to the userland mapping.
839 835 * Note: we ensure that the mapping being unmapped
840 836 * here is the current one for the QP. This is
841 837 * more of a sanity check here since, unlike CQs
842 838 * (above) we do not support resize of QPs.
843 839 */
844 840 mutex_enter(&srq->srq_lock);
845 841 if (srq->srq_umap_dhp == dhp) {
846 842 srq->srq_umap_dhp = NULL;
847 843 }
848 844 mutex_exit(&srq->srq_lock);
849 845 }
850 846 }
851 847 }
852 848
853 849
854 850 /*
855 851 * hermon_devmap_devmem_map()
856 852 * Context: Can be called from kernel context.
857 853 */
858 854 /* ARGSUSED */
859 855 static int
860 856 hermon_devmap_dbrecmem_map(devmap_cookie_t dhp, dev_t dev, uint_t flags,
861 857 offset_t off, size_t len, void **pvtp)
862 858 {
863 859 hermon_state_t *state;
864 860 hermon_devmap_track_t *dvm_track;
865 861 hermon_cqhdl_t cq;
866 862 hermon_qphdl_t qp;
867 863 hermon_srqhdl_t srq;
868 864 minor_t instance;
869 865 uint64_t key;
870 866 uint_t type;
871 867
872 868 /* Get Hermon softstate structure from instance */
873 869 instance = HERMON_DEV_INSTANCE(dev);
874 870 state = ddi_get_soft_state(hermon_statep, instance);
875 871 if (state == NULL) {
876 872 return (ENXIO);
877 873 }
878 874
879 875 /*
880 876 * The bottom bits of "offset" are undefined (number depends on
881 877 * system PAGESIZE). Shifting these off leaves us with a "key".
882 878 * The "key" is actually a combination of both a real key value
883 879 * (for the purpose of database lookup) and a "type" value. Although
884 880 * we are not going to do any database lookup per se, we do want
885 881 * to extract the "key" and the "type" (to enable faster lookup of
886 882 * the appropriate CQ or QP handle).
887 883 */
↓ open down ↓ |
200 lines elided |
↑ open up ↑ |
888 884 key = off >> PAGESHIFT;
889 885 type = key & MLNX_UMAP_RSRC_TYPE_MASK;
890 886 key = key >> MLNX_UMAP_RSRC_TYPE_SHIFT;
891 887
892 888 /*
893 889 * Allocate an entry to track the mapping and unmapping (specifically,
894 890 * partial unmapping) of this resource.
895 891 */
896 892 dvm_track = (hermon_devmap_track_t *)kmem_zalloc(
897 893 sizeof (hermon_devmap_track_t), KM_SLEEP);
898 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
899 894 dvm_track->hdt_offset = off;
900 895 dvm_track->hdt_state = state;
901 896 dvm_track->hdt_refcnt = 1;
902 897 mutex_init(&dvm_track->hdt_lock, NULL, MUTEX_DRIVER,
903 898 DDI_INTR_PRI(state->hs_intrmsi_pri));
904 899
905 900 /*
906 901 * Depending of the type of resource that has been mapped out, we
907 902 * need to update the QP or CQ handle to reflect that it has, in
908 903 * fact, been mapped. This allows the driver code which frees a QP
909 904 * or a CQ to know whether it is appropriate to do a
910 905 * devmap_devmem_remap() to invalidate the userland mapping for the
911 906 * corresponding queue's memory.
912 907 */
913 908 if (type == MLNX_UMAP_CQMEM_RSRC) {
914 909
915 910 /* Use "key" (CQ number) to do fast lookup of CQ handle */
916 911 cq = hermon_cqhdl_from_cqnum(state, key);
917 912
918 913 /*
919 914 * Update the handle to the userland mapping. Note: If
920 915 * the CQ already has a valid userland mapping, then stop
921 916 * and return failure.
922 917 */
923 918 mutex_enter(&cq->cq_lock);
924 919 if (cq->cq_umap_dhp == NULL) {
925 920 cq->cq_umap_dhp = dhp;
926 921 dvm_track->hdt_size = cq->cq_cqinfo.qa_size;
927 922 mutex_exit(&cq->cq_lock);
928 923 } else {
929 924 mutex_exit(&cq->cq_lock);
930 925 goto umem_map_fail;
931 926 }
932 927
933 928 } else if (type == MLNX_UMAP_QPMEM_RSRC) {
934 929
935 930 /* Use "key" (QP number) to do fast lookup of QP handle */
936 931 qp = hermon_qphdl_from_qpnum(state, key);
937 932
938 933 /*
939 934 * Update the handle to the userland mapping. Note: If
940 935 * the CQ already has a valid userland mapping, then stop
941 936 * and return failure.
942 937 */
943 938 mutex_enter(&qp->qp_lock);
944 939 if (qp->qp_umap_dhp == NULL) {
945 940 qp->qp_umap_dhp = dhp;
946 941 dvm_track->hdt_size = qp->qp_wqinfo.qa_size;
947 942 mutex_exit(&qp->qp_lock);
948 943 } else {
949 944 mutex_exit(&qp->qp_lock);
950 945 goto umem_map_fail;
951 946 }
952 947
953 948 } else if (type == MLNX_UMAP_SRQMEM_RSRC) {
954 949
955 950 /* Use "key" (SRQ number) to do fast lookup on SRQ handle */
956 951 srq = hermon_srqhdl_from_srqnum(state, key);
957 952
958 953 /*
959 954 * Update the handle to the userland mapping. Note: If the
960 955 * SRQ already has a valid userland mapping, then stop and
961 956 * return failure.
962 957 */
963 958 mutex_enter(&srq->srq_lock);
964 959 if (srq->srq_umap_dhp == NULL) {
965 960 srq->srq_umap_dhp = dhp;
966 961 dvm_track->hdt_size = srq->srq_wqinfo.qa_size;
967 962 mutex_exit(&srq->srq_lock);
968 963 } else {
969 964 mutex_exit(&srq->srq_lock);
970 965 goto umem_map_fail;
971 966 }
972 967 }
973 968
974 969 /*
975 970 * Pass the private "Hermon devmap tracking structure" back. This
976 971 * pointer will be returned in subsequent "unmap" callbacks.
977 972 */
978 973 *pvtp = dvm_track;
979 974
980 975 return (DDI_SUCCESS);
981 976
982 977 umem_map_fail:
983 978 mutex_destroy(&dvm_track->hdt_lock);
984 979 kmem_free(dvm_track, sizeof (hermon_devmap_track_t));
985 980 return (DDI_FAILURE);
986 981 }
987 982
988 983
989 984 /*
990 985 * hermon_devmap_dbrecmem_dup()
991 986 * Context: Can be called from kernel context.
992 987 */
993 988 /* ARGSUSED */
994 989 static int
995 990 hermon_devmap_dbrecmem_dup(devmap_cookie_t dhp, void *pvtp,
996 991 devmap_cookie_t new_dhp, void **new_pvtp)
997 992 {
↓ open down ↓ |
89 lines elided |
↑ open up ↑ |
998 993 hermon_state_t *state;
999 994 hermon_devmap_track_t *dvm_track, *new_dvm_track;
1000 995 uint_t maxprot;
1001 996 int status;
1002 997
1003 998 /*
1004 999 * Extract the Hermon softstate pointer from "Hermon devmap tracking
1005 1000 * structure" (in "pvtp").
1006 1001 */
1007 1002 dvm_track = (hermon_devmap_track_t *)pvtp;
1008 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
1009 1003 state = dvm_track->hdt_state;
1010 1004
1011 1005 /*
1012 1006 * Since this devmap_dup() entry point is generally called
1013 1007 * when a process does fork(2), it is incumbent upon the driver
1014 1008 * to insure that the child does not inherit a valid copy of
1015 1009 * the parent's QP or CQ resource. This is accomplished by using
1016 1010 * devmap_devmem_remap() to invalidate the child's mapping to the
1017 1011 * kernel memory.
1018 1012 */
1019 1013 maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
1020 1014 status = devmap_devmem_remap(new_dhp, state->hs_dip, 0, 0,
1021 1015 dvm_track->hdt_size, maxprot, DEVMAP_MAPPING_INVALID, NULL);
1022 1016 if (status != DDI_SUCCESS) {
1023 1017 HERMON_WARNING(state, "failed in hermon_devmap_dbrecmem_dup()");
1024 1018 return (status);
1025 1019 }
1026 1020
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
1027 1021 /*
1028 1022 * Allocate a new entry to track the subsequent unmapping
1029 1023 * (specifically, all partial unmappings) of the child's newly
1030 1024 * invalidated resource. Note: Setting the "hdt_size" field to
1031 1025 * zero here is an indication to the devmap_unmap() entry point
1032 1026 * that this mapping is invalid, and that its subsequent unmapping
1033 1027 * should not affect any of the parent's CQ or QP resources.
1034 1028 */
1035 1029 new_dvm_track = (hermon_devmap_track_t *)kmem_zalloc(
1036 1030 sizeof (hermon_devmap_track_t), KM_SLEEP);
1037 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*new_dvm_track))
1038 1031 new_dvm_track->hdt_offset = 0;
1039 1032 new_dvm_track->hdt_state = state;
1040 1033 new_dvm_track->hdt_refcnt = 1;
1041 1034 new_dvm_track->hdt_size = 0;
1042 1035 mutex_init(&new_dvm_track->hdt_lock, NULL, MUTEX_DRIVER,
1043 1036 DDI_INTR_PRI(state->hs_intrmsi_pri));
1044 1037 *new_pvtp = new_dvm_track;
1045 1038
1046 1039 return (DDI_SUCCESS);
1047 1040 }
1048 1041
1049 1042
1050 1043 /*
1051 1044 * hermon_devmap_dbrecmem_unmap()
1052 1045 * Context: Can be called from kernel context.
1053 1046 */
1054 1047 /* ARGSUSED */
1055 1048 static void
1056 1049 hermon_devmap_dbrecmem_unmap(devmap_cookie_t dhp, void *pvtp, offset_t off,
1057 1050 size_t len, devmap_cookie_t new_dhp1, void **pvtp1,
1058 1051 devmap_cookie_t new_dhp2, void **pvtp2)
1059 1052 {
1060 1053 hermon_state_t *state;
1061 1054 hermon_rsrc_t *rsrcp;
1062 1055 hermon_devmap_track_t *dvm_track;
1063 1056 hermon_cqhdl_t cq;
1064 1057 hermon_qphdl_t qp;
1065 1058 hermon_srqhdl_t srq;
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
1066 1059 uint64_t key, value;
1067 1060 uint_t type;
1068 1061 uint_t size;
1069 1062 int status;
1070 1063
1071 1064 /*
1072 1065 * Extract the Hermon softstate pointer from "Hermon devmap tracking
1073 1066 * structure" (in "pvtp").
1074 1067 */
1075 1068 dvm_track = (hermon_devmap_track_t *)pvtp;
1076 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
1077 1069 state = dvm_track->hdt_state;
1078 1070
1079 1071 /*
1080 1072 * Extract the "offset" from the "Hermon devmap tracking structure".
1081 1073 * Note: The input argument "off" is ignored here because the
1082 1074 * Hermon mapping interfaces define a very specific meaning to
1083 1075 * each "logical offset". Also extract the "key" and "type" encoded
1084 1076 * in the logical offset.
1085 1077 */
1086 1078 key = dvm_track->hdt_offset >> PAGESHIFT;
1087 1079 type = key & MLNX_UMAP_RSRC_TYPE_MASK;
1088 1080 key = key >> MLNX_UMAP_RSRC_TYPE_SHIFT;
1089 1081
1090 1082 /*
1091 1083 * Extract the "size" of the mapping. If this size is determined
1092 1084 * to be zero, then it is an indication of a previously invalidated
1093 1085 * mapping, and no CQ or QP resources should be affected.
1094 1086 */
1095 1087 size = dvm_track->hdt_size;
1096 1088
1097 1089 /*
1098 1090 * If only the "middle portion of a given mapping is being unmapped,
1099 1091 * then we are effectively creating one new piece of mapped memory.
1100 1092 * (Original region is divided into three pieces of which the middle
1101 1093 * piece is being removed. This leaves two pieces. Since we started
1102 1094 * with one piece and now have two pieces, we need to increment the
1103 1095 * counter in the "Hermon devmap tracking structure".
1104 1096 *
1105 1097 * If, however, the whole mapped region is being unmapped, then we
1106 1098 * have started with one region which we are completely removing.
1107 1099 * In this case, we need to decrement the counter in the "Hermon
1108 1100 * devmap tracking structure".
1109 1101 *
1110 1102 * In each of the remaining cases, we will have started with one
1111 1103 * mapped region and ended with one (different) region. So no counter
1112 1104 * modification is necessary.
1113 1105 */
1114 1106 mutex_enter(&dvm_track->hdt_lock);
1115 1107 if ((new_dhp1 == NULL) && (new_dhp2 == NULL)) {
1116 1108 dvm_track->hdt_refcnt--;
1117 1109 } else if ((new_dhp1 != NULL) && (new_dhp2 != NULL)) {
1118 1110 dvm_track->hdt_refcnt++;
1119 1111 }
1120 1112 mutex_exit(&dvm_track->hdt_lock);
1121 1113
1122 1114 /*
1123 1115 * For each of the cases where the region is being divided, then we
1124 1116 * need to pass back the "Hermon devmap tracking structure". This way
1125 1117 * we get it back when each of the remaining pieces is subsequently
1126 1118 * unmapped.
1127 1119 */
1128 1120 if (new_dhp1 != NULL) {
1129 1121 *pvtp1 = pvtp;
1130 1122 }
1131 1123 if (new_dhp2 != NULL) {
1132 1124 *pvtp2 = pvtp;
1133 1125 }
1134 1126
1135 1127 /*
1136 1128 * If the "Hermon devmap tracking structure" is no longer being
1137 1129 * referenced, then free it up. Otherwise, return.
1138 1130 */
1139 1131 if (dvm_track->hdt_refcnt == 0) {
1140 1132 mutex_destroy(&dvm_track->hdt_lock);
1141 1133 kmem_free(dvm_track, sizeof (hermon_devmap_track_t));
1142 1134
1143 1135 /*
1144 1136 * If the mapping was invalid (see explanation above), then
1145 1137 * no further processing is necessary.
1146 1138 */
1147 1139 if (size == 0) {
1148 1140 return;
1149 1141 }
1150 1142 } else {
1151 1143 return;
1152 1144 }
1153 1145
1154 1146 /*
1155 1147 * Now that we can guarantee that the user memory is fully unmapped,
1156 1148 * we can use the "key" and "type" values to try to find the entry
1157 1149 * in the "userland resources database". If it's found, then it
1158 1150 * indicates that the queue memory (CQ or QP) has not yet been freed.
1159 1151 * In this case, we update the corresponding CQ or QP handle to
1160 1152 * indicate that the "devmap_devmem_remap()" call will be unnecessary.
1161 1153 * If it's _not_ found, then it indicates that the CQ or QP memory
1162 1154 * was, in fact, freed before it was unmapped (thus requiring a
1163 1155 * previous invalidation by remapping - which will already have
1164 1156 * been done in the free routine).
1165 1157 */
1166 1158 status = hermon_umap_db_find(state->hs_instance, key, type, &value,
1167 1159 0, NULL);
1168 1160 if (status == DDI_SUCCESS) {
1169 1161 /*
1170 1162 * Depending on the type of the mapped resource (CQ or QP),
1171 1163 * update handle to indicate that no invalidation remapping
1172 1164 * will be necessary.
1173 1165 */
1174 1166 if (type == MLNX_UMAP_CQMEM_RSRC) {
1175 1167
1176 1168 /* Use "value" to convert to CQ handle */
1177 1169 rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
1178 1170 cq = (hermon_cqhdl_t)rsrcp->hr_addr;
1179 1171
1180 1172 /*
1181 1173 * Invalidate the handle to the userland mapping.
1182 1174 * Note: We must ensure that the mapping being
1183 1175 * unmapped here is the current one for the CQ. It
1184 1176 * is possible that it might not be if this CQ has
1185 1177 * been resized and the previous CQ memory has not
1186 1178 * yet been unmapped. But in that case, because of
1187 1179 * the devmap_devmem_remap(), there is no longer any
1188 1180 * association between the mapping and the real CQ
1189 1181 * kernel memory.
1190 1182 */
1191 1183 mutex_enter(&cq->cq_lock);
1192 1184 if (cq->cq_umap_dhp == dhp) {
1193 1185 cq->cq_umap_dhp = NULL;
1194 1186 }
1195 1187 mutex_exit(&cq->cq_lock);
1196 1188
1197 1189 } else if (type == MLNX_UMAP_QPMEM_RSRC) {
1198 1190
1199 1191 /* Use "value" to convert to QP handle */
1200 1192 rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
1201 1193 qp = (hermon_qphdl_t)rsrcp->hr_addr;
1202 1194
1203 1195 /*
1204 1196 * Invalidate the handle to the userland mapping.
1205 1197 * Note: we ensure that the mapping being unmapped
1206 1198 * here is the current one for the QP. This is
1207 1199 * more of a sanity check here since, unlike CQs
1208 1200 * (above) we do not support resize of QPs.
1209 1201 */
1210 1202 mutex_enter(&qp->qp_lock);
1211 1203 if (qp->qp_umap_dhp == dhp) {
1212 1204 qp->qp_umap_dhp = NULL;
1213 1205 }
1214 1206 mutex_exit(&qp->qp_lock);
1215 1207
1216 1208 } else if (type == MLNX_UMAP_SRQMEM_RSRC) {
1217 1209
1218 1210 /* Use "value" to convert to SRQ handle */
1219 1211 rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
1220 1212 srq = (hermon_srqhdl_t)rsrcp->hr_addr;
1221 1213
1222 1214 /*
1223 1215 * Invalidate the handle to the userland mapping.
1224 1216 * Note: we ensure that the mapping being unmapped
1225 1217 * here is the current one for the QP. This is
1226 1218 * more of a sanity check here since, unlike CQs
1227 1219 * (above) we do not support resize of QPs.
1228 1220 */
1229 1221 mutex_enter(&srq->srq_lock);
1230 1222 if (srq->srq_umap_dhp == dhp) {
1231 1223 srq->srq_umap_dhp = NULL;
1232 1224 }
1233 1225 mutex_exit(&srq->srq_lock);
1234 1226 }
1235 1227 }
1236 1228 }
1237 1229
1238 1230
1239 1231 /*
1240 1232 * hermon_devmap_devmem_map()
1241 1233 * Context: Can be called from kernel context.
1242 1234 */
1243 1235 /* ARGSUSED */
1244 1236 static int
1245 1237 hermon_devmap_devmem_map(devmap_cookie_t dhp, dev_t dev, uint_t flags,
1246 1238 offset_t off, size_t len, void **pvtp)
1247 1239 {
1248 1240 hermon_state_t *state;
1249 1241 hermon_devmap_track_t *dvm_track;
1250 1242 minor_t instance;
1251 1243
1252 1244 /* Get Hermon softstate structure from instance */
1253 1245 instance = HERMON_DEV_INSTANCE(dev);
1254 1246 state = ddi_get_soft_state(hermon_statep, instance);
1255 1247 if (state == NULL) {
1256 1248 return (ENXIO);
1257 1249 }
1258 1250
↓ open down ↓ |
172 lines elided |
↑ open up ↑ |
1259 1251 /*
1260 1252 * Allocate an entry to track the mapping and unmapping of this
1261 1253 * resource. Note: We don't need to initialize the "refcnt" or
1262 1254 * "offset" fields here, nor do we need to initialize the mutex
1263 1255 * used with the "refcnt". Since UAR pages are single pages, they
1264 1256 * are not subject to "partial" unmappings. This makes these other
1265 1257 * fields unnecessary.
1266 1258 */
1267 1259 dvm_track = (hermon_devmap_track_t *)kmem_zalloc(
1268 1260 sizeof (hermon_devmap_track_t), KM_SLEEP);
1269 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
1270 1261 dvm_track->hdt_state = state;
1271 1262 dvm_track->hdt_size = (uint_t)PAGESIZE;
1272 1263
1273 1264 /*
1274 1265 * Pass the private "Hermon devmap tracking structure" back. This
1275 1266 * pointer will be returned in a subsequent "unmap" callback.
1276 1267 */
1277 1268 *pvtp = dvm_track;
1278 1269
1279 1270 return (DDI_SUCCESS);
1280 1271 }
1281 1272
1282 1273
1283 1274 /*
1284 1275 * hermon_devmap_devmem_dup()
1285 1276 * Context: Can be called from kernel context.
1286 1277 */
1287 1278 /* ARGSUSED */
1288 1279 static int
1289 1280 hermon_devmap_devmem_dup(devmap_cookie_t dhp, void *pvtp,
1290 1281 devmap_cookie_t new_dhp, void **new_pvtp)
1291 1282 {
1292 1283 hermon_state_t *state;
1293 1284 hermon_devmap_track_t *dvm_track;
1294 1285 uint_t maxprot;
1295 1286 int status;
1296 1287
1297 1288 /*
1298 1289 * Extract the Hermon softstate pointer from "Hermon devmap tracking
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
1299 1290 * structure" (in "pvtp"). Note: If the tracking structure is NULL
1300 1291 * here, it means that the mapping corresponds to an invalid mapping.
1301 1292 * In this case, it can be safely ignored ("new_pvtp" set to NULL).
1302 1293 */
1303 1294 dvm_track = (hermon_devmap_track_t *)pvtp;
1304 1295 if (dvm_track == NULL) {
1305 1296 *new_pvtp = NULL;
1306 1297 return (DDI_SUCCESS);
1307 1298 }
1308 1299
1309 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
1310 1300 state = dvm_track->hdt_state;
1311 1301
1312 1302 /*
1313 1303 * Since this devmap_dup() entry point is generally called
1314 1304 * when a process does fork(2), it is incumbent upon the driver
1315 1305 * to insure that the child does not inherit a valid copy of
1316 1306 * the parent's resource. This is accomplished by using
1317 1307 * devmap_devmem_remap() to invalidate the child's mapping to the
1318 1308 * kernel memory.
1319 1309 */
1320 1310 maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
1321 1311 status = devmap_devmem_remap(new_dhp, state->hs_dip, 0, 0,
1322 1312 dvm_track->hdt_size, maxprot, DEVMAP_MAPPING_INVALID, NULL);
1323 1313 if (status != DDI_SUCCESS) {
1324 1314 HERMON_WARNING(state, "failed in hermon_devmap_devmem_dup()");
1325 1315 return (status);
1326 1316 }
1327 1317
1328 1318 /*
1329 1319 * Since the region is invalid, there is no need for us to
1330 1320 * allocate and continue to track an additional "Hermon devmap
1331 1321 * tracking structure". Instead we return NULL here, which is an
1332 1322 * indication to the devmap_unmap() entry point that this entry
1333 1323 * can be safely ignored.
1334 1324 */
1335 1325 *new_pvtp = NULL;
1336 1326
1337 1327 return (DDI_SUCCESS);
1338 1328 }
1339 1329
1340 1330
1341 1331 /*
1342 1332 * hermon_devmap_devmem_unmap()
1343 1333 * Context: Can be called from kernel context.
1344 1334 */
1345 1335 /* ARGSUSED */
1346 1336 static void
1347 1337 hermon_devmap_devmem_unmap(devmap_cookie_t dhp, void *pvtp, offset_t off,
1348 1338 size_t len, devmap_cookie_t new_dhp1, void **pvtp1,
1349 1339 devmap_cookie_t new_dhp2, void **pvtp2)
1350 1340 {
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
1351 1341 hermon_devmap_track_t *dvm_track;
1352 1342
1353 1343 /*
1354 1344 * Free up the "Hermon devmap tracking structure" (in "pvtp").
1355 1345 * There cannot be "partial" unmappings here because all UAR pages
1356 1346 * are single pages. Note: If the tracking structure is NULL here,
1357 1347 * it means that the mapping corresponds to an invalid mapping. In
1358 1348 * this case, it can be safely ignored.
1359 1349 */
1360 1350 dvm_track = (hermon_devmap_track_t *)pvtp;
1361 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
1362 1351 if (dvm_track == NULL) {
1363 1352 return;
1364 1353 }
1365 1354
1366 1355 kmem_free(dvm_track, sizeof (hermon_devmap_track_t));
1367 1356 }
1368 1357
1369 1358
1370 1359 /*
1371 1360 * hermon_umap_ci_data_in()
1372 1361 * Context: Can be called from user or kernel context.
1373 1362 */
1374 1363 /* ARGSUSED */
1375 1364 ibt_status_t
1376 1365 hermon_umap_ci_data_in(hermon_state_t *state, ibt_ci_data_flags_t flags,
1377 1366 ibt_object_type_t object, void *hdl, void *data_p, size_t data_sz)
1378 1367 {
1379 1368 int status;
1380 1369
1381 1370 /*
1382 1371 * Depending on the type of object about which additional information
1383 1372 * is being provided (currently only MR is supported), we call the
1384 1373 * appropriate resource-specific function.
1385 1374 */
1386 1375 switch (object) {
1387 1376 case IBT_HDL_MR:
1388 1377 status = hermon_umap_mr_data_in((hermon_mrhdl_t)hdl,
1389 1378 (ibt_mr_data_in_t *)data_p, data_sz);
1390 1379 if (status != DDI_SUCCESS) {
1391 1380 return (status);
1392 1381 }
1393 1382 break;
1394 1383
1395 1384 /*
1396 1385 * For other possible valid IBT types, we return IBT_NOT_SUPPORTED,
1397 1386 * since the Hermon driver does not support these.
1398 1387 */
1399 1388 case IBT_HDL_HCA:
1400 1389 case IBT_HDL_QP:
1401 1390 case IBT_HDL_CQ:
1402 1391 case IBT_HDL_PD:
1403 1392 case IBT_HDL_MW:
1404 1393 case IBT_HDL_AH:
1405 1394 case IBT_HDL_SCHED:
1406 1395 case IBT_HDL_EEC:
1407 1396 case IBT_HDL_RDD:
1408 1397 case IBT_HDL_SRQ:
1409 1398 return (IBT_NOT_SUPPORTED);
1410 1399
1411 1400 /*
1412 1401 * Any other types are invalid.
1413 1402 */
1414 1403 default:
1415 1404 return (IBT_INVALID_PARAM);
1416 1405 }
1417 1406
1418 1407 return (DDI_SUCCESS);
1419 1408 }
1420 1409
1421 1410
1422 1411 /*
1423 1412 * hermon_umap_mr_data_in()
1424 1413 * Context: Can be called from user or kernel context.
1425 1414 */
1426 1415 static ibt_status_t
1427 1416 hermon_umap_mr_data_in(hermon_mrhdl_t mr, ibt_mr_data_in_t *data,
1428 1417 size_t data_sz)
1429 1418 {
1430 1419 if (data->mr_rev != IBT_MR_DATA_IN_IF_VERSION) {
1431 1420 return (IBT_NOT_SUPPORTED);
1432 1421 }
↓ open down ↓ |
61 lines elided |
↑ open up ↑ |
1433 1422
1434 1423 /* Check for valid MR handle pointer */
1435 1424 if (mr == NULL) {
1436 1425 return (IBT_MR_HDL_INVALID);
1437 1426 }
1438 1427
1439 1428 /* Check for valid MR input structure size */
1440 1429 if (data_sz < sizeof (ibt_mr_data_in_t)) {
1441 1430 return (IBT_INSUFF_RESOURCE);
1442 1431 }
1443 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*data))
1444 1432
1445 1433 /*
1446 1434 * Ensure that the MR corresponds to userland memory and that it is
1447 1435 * a currently valid memory region as well.
1448 1436 */
1449 1437 mutex_enter(&mr->mr_lock);
1450 1438 if ((mr->mr_is_umem == 0) || (mr->mr_umemcookie == NULL)) {
1451 1439 mutex_exit(&mr->mr_lock);
1452 1440 return (IBT_MR_HDL_INVALID);
1453 1441 }
1454 1442
1455 1443 /*
1456 1444 * If it has passed all the above checks, then extract the callback
1457 1445 * function and argument from the input structure. Copy them into
1458 1446 * the MR handle. This function will be called only if the memory
1459 1447 * corresponding to the MR handle gets a umem_lockmemory() callback.
1460 1448 */
1461 1449 mr->mr_umem_cbfunc = data->mr_func;
1462 1450 mr->mr_umem_cbarg1 = data->mr_arg1;
1463 1451 mr->mr_umem_cbarg2 = data->mr_arg2;
1464 1452 mutex_exit(&mr->mr_lock);
1465 1453
1466 1454 return (DDI_SUCCESS);
1467 1455 }
1468 1456
1469 1457
1470 1458 /*
1471 1459 * hermon_umap_ci_data_out()
1472 1460 * Context: Can be called from user or kernel context.
1473 1461 */
1474 1462 /* ARGSUSED */
1475 1463 ibt_status_t
1476 1464 hermon_umap_ci_data_out(hermon_state_t *state, ibt_ci_data_flags_t flags,
1477 1465 ibt_object_type_t object, void *hdl, void *data_p, size_t data_sz)
1478 1466 {
1479 1467 int status;
1480 1468
1481 1469 /*
1482 1470 * Depending on the type of object about which additional information
1483 1471 * is being requested (CQ or QP), we call the appropriate resource-
1484 1472 * specific mapping function.
1485 1473 */
1486 1474 switch (object) {
1487 1475 case IBT_HDL_CQ:
1488 1476 status = hermon_umap_cq_data_out((hermon_cqhdl_t)hdl,
1489 1477 (mlnx_umap_cq_data_out_t *)data_p, data_sz);
1490 1478 if (status != DDI_SUCCESS) {
1491 1479 return (status);
1492 1480 }
1493 1481 break;
1494 1482
1495 1483 case IBT_HDL_QP:
1496 1484 status = hermon_umap_qp_data_out((hermon_qphdl_t)hdl,
1497 1485 (mlnx_umap_qp_data_out_t *)data_p, data_sz);
1498 1486 if (status != DDI_SUCCESS) {
1499 1487 return (status);
1500 1488 }
1501 1489 break;
1502 1490
1503 1491 case IBT_HDL_SRQ:
1504 1492 status = hermon_umap_srq_data_out((hermon_srqhdl_t)hdl,
1505 1493 (mlnx_umap_srq_data_out_t *)data_p, data_sz);
1506 1494 if (status != DDI_SUCCESS) {
1507 1495 return (status);
1508 1496 }
1509 1497 break;
1510 1498
1511 1499 case IBT_HDL_PD:
1512 1500 status = hermon_umap_pd_data_out((hermon_pdhdl_t)hdl,
1513 1501 (mlnx_umap_pd_data_out_t *)data_p, data_sz);
1514 1502 if (status != DDI_SUCCESS) {
1515 1503 return (status);
1516 1504 }
1517 1505 break;
1518 1506
1519 1507 /*
1520 1508 * For other possible valid IBT types, we return IBT_NOT_SUPPORTED,
1521 1509 * since the Hermon driver does not support these.
1522 1510 */
1523 1511 case IBT_HDL_HCA:
1524 1512 case IBT_HDL_MR:
1525 1513 case IBT_HDL_MW:
1526 1514 case IBT_HDL_AH:
1527 1515 case IBT_HDL_SCHED:
1528 1516 case IBT_HDL_EEC:
1529 1517 case IBT_HDL_RDD:
1530 1518 return (IBT_NOT_SUPPORTED);
1531 1519
1532 1520 /*
1533 1521 * Any other types are invalid.
1534 1522 */
1535 1523 default:
1536 1524 return (IBT_INVALID_PARAM);
1537 1525 }
1538 1526
1539 1527 return (DDI_SUCCESS);
1540 1528 }
1541 1529
1542 1530
1543 1531 /*
1544 1532 * hermon_umap_cq_data_out()
1545 1533 * Context: Can be called from user or kernel context.
1546 1534 */
1547 1535 static ibt_status_t
1548 1536 hermon_umap_cq_data_out(hermon_cqhdl_t cq, mlnx_umap_cq_data_out_t *data,
1549 1537 size_t data_sz)
↓ open down ↓ |
96 lines elided |
↑ open up ↑ |
1550 1538 {
1551 1539 /* Check for valid CQ handle pointer */
1552 1540 if (cq == NULL) {
1553 1541 return (IBT_CQ_HDL_INVALID);
1554 1542 }
1555 1543
1556 1544 /* Check for valid CQ mapping structure size */
1557 1545 if (data_sz < sizeof (mlnx_umap_cq_data_out_t)) {
1558 1546 return (IBT_INSUFF_RESOURCE);
1559 1547 }
1560 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*data))
1561 1548
1562 1549 /* deal with cq_alloc() verses cq_resize() */
1563 1550 if (cq->cq_resize_hdl) {
1564 1551 data->mcq_maplen = cq->cq_resize_hdl->cq_cqinfo.qa_size;
1565 1552 data->mcq_numcqe = cq->cq_resize_hdl->cq_bufsz;
1566 1553 } else {
1567 1554 data->mcq_maplen = cq->cq_cqinfo.qa_size;
1568 1555 data->mcq_numcqe = cq->cq_bufsz;
1569 1556 }
1570 1557
1571 1558 /*
1572 1559 * If it has passed all the above checks, then fill in all the useful
1573 1560 * mapping information (including the mapping offset that will be
1574 1561 * passed back to the devmap() interface during a subsequent mmap()
1575 1562 * call.
1576 1563 *
1577 1564 * The "offset" for CQ mmap()'s looks like this:
1578 1565 * +----------------------------------------+--------+--------------+
1579 1566 * | CQ Number | 0x33 | Reserved (0) |
1580 1567 * +----------------------------------------+--------+--------------+
1581 1568 * (64 - 8 - PAGESHIFT) bits 8 bits PAGESHIFT bits
1582 1569 *
1583 1570 * This returns information about the mapping offset, the length of
1584 1571 * the CQ memory, the CQ number (for use in later CQ doorbells), the
1585 1572 * number of CQEs the CQ memory can hold, and the size of each CQE.
1586 1573 */
1587 1574 data->mcq_rev = MLNX_UMAP_IF_VERSION;
1588 1575 data->mcq_mapoffset = ((((uint64_t)cq->cq_cqnum <<
1589 1576 MLNX_UMAP_RSRC_TYPE_SHIFT) | MLNX_UMAP_CQMEM_RSRC) << PAGESHIFT);
1590 1577 data->mcq_cqnum = cq->cq_cqnum;
1591 1578 data->mcq_cqesz = sizeof (hermon_hw_cqe_t);
1592 1579
1593 1580 /* doorbell record fields */
1594 1581 data->mcq_polldbr_mapoffset = cq->cq_dbr_mapoffset;
1595 1582 data->mcq_polldbr_maplen = PAGESIZE;
1596 1583 data->mcq_polldbr_offset = (uintptr_t)cq->cq_arm_ci_vdbr &
1597 1584 PAGEOFFSET;
1598 1585 data->mcq_armdbr_mapoffset = cq->cq_dbr_mapoffset;
1599 1586 data->mcq_armdbr_maplen = PAGESIZE;
1600 1587 data->mcq_armdbr_offset = data->mcq_polldbr_offset + 4;
1601 1588
1602 1589 return (DDI_SUCCESS);
1603 1590 }
1604 1591
1605 1592
1606 1593 /*
1607 1594 * hermon_umap_qp_data_out()
1608 1595 * Context: Can be called from user or kernel context.
1609 1596 */
1610 1597 static ibt_status_t
1611 1598 hermon_umap_qp_data_out(hermon_qphdl_t qp, mlnx_umap_qp_data_out_t *data,
1612 1599 size_t data_sz)
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
1613 1600 {
1614 1601 /* Check for valid QP handle pointer */
1615 1602 if (qp == NULL) {
1616 1603 return (IBT_QP_HDL_INVALID);
1617 1604 }
1618 1605
1619 1606 /* Check for valid QP mapping structure size */
1620 1607 if (data_sz < sizeof (mlnx_umap_qp_data_out_t)) {
1621 1608 return (IBT_INSUFF_RESOURCE);
1622 1609 }
1623 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*data))
1624 1610
1625 1611 /*
1626 1612 * If it has passed all the checks, then fill in all the useful
1627 1613 * mapping information (including the mapping offset that will be
1628 1614 * passed back to the devmap() interface during a subsequent mmap()
1629 1615 * call.
1630 1616 *
1631 1617 * The "offset" for QP mmap()'s looks like this:
1632 1618 * +----------------------------------------+--------+--------------+
1633 1619 * | QP Number | 0x44 | Reserved (0) |
1634 1620 * +----------------------------------------+--------+--------------+
1635 1621 * (64 - 8 - PAGESHIFT) bits 8 bits PAGESHIFT bits
1636 1622 *
1637 1623 * This returns information about the mapping offset, the length of
1638 1624 * the QP memory, and the QP number (for use in later send and recv
1639 1625 * doorbells). It also returns the following information for both
1640 1626 * the receive work queue and the send work queue, respectively: the
1641 1627 * offset (from the base mapped address) of the start of the given
1642 1628 * work queue, the 64-bit IB virtual address that corresponds to
1643 1629 * the base mapped address (needed for posting WQEs though the
1644 1630 * QP doorbells), the number of WQEs the given work queue can hold,
1645 1631 * and the size of each WQE for the given work queue.
1646 1632 */
1647 1633 data->mqp_rev = MLNX_UMAP_IF_VERSION;
1648 1634 data->mqp_mapoffset = ((((uint64_t)qp->qp_qpnum <<
1649 1635 MLNX_UMAP_RSRC_TYPE_SHIFT) | MLNX_UMAP_QPMEM_RSRC) << PAGESHIFT);
1650 1636 data->mqp_maplen = qp->qp_wqinfo.qa_size;
1651 1637 data->mqp_qpnum = qp->qp_qpnum;
1652 1638
1653 1639 /*
1654 1640 * If this QP is associated with a shared receive queue (SRQ),
1655 1641 * then return invalid RecvQ parameters. Otherwise, return
1656 1642 * the proper parameter values.
1657 1643 */
1658 1644 if (qp->qp_alloc_flags & IBT_QP_USES_SRQ) {
1659 1645 data->mqp_rq_off = (uint32_t)qp->qp_wqinfo.qa_size;
1660 1646 data->mqp_rq_desc_addr = (uint32_t)qp->qp_wqinfo.qa_size;
1661 1647 data->mqp_rq_numwqe = 0;
1662 1648 data->mqp_rq_wqesz = 0;
1663 1649 data->mqp_rdbr_mapoffset = 0;
1664 1650 data->mqp_rdbr_maplen = 0;
1665 1651 data->mqp_rdbr_offset = 0;
1666 1652 } else {
1667 1653 data->mqp_rq_off = (uintptr_t)qp->qp_rq_buf -
1668 1654 (uintptr_t)qp->qp_wqinfo.qa_buf_aligned;
1669 1655 data->mqp_rq_desc_addr = (uint32_t)((uintptr_t)qp->qp_rq_buf -
1670 1656 qp->qp_desc_off);
1671 1657 data->mqp_rq_numwqe = qp->qp_rq_bufsz;
1672 1658 data->mqp_rq_wqesz = (1 << qp->qp_rq_log_wqesz);
1673 1659
1674 1660 /* doorbell record fields */
1675 1661 data->mqp_rdbr_mapoffset = qp->qp_rdbr_mapoffset;
1676 1662 data->mqp_rdbr_maplen = PAGESIZE;
1677 1663 data->mqp_rdbr_offset = (uintptr_t)qp->qp_rq_vdbr &
1678 1664 PAGEOFFSET;
1679 1665 }
1680 1666 data->mqp_sq_off = (uintptr_t)qp->qp_sq_buf -
1681 1667 (uintptr_t)qp->qp_wqinfo.qa_buf_aligned;
1682 1668 data->mqp_sq_desc_addr = (uint32_t)((uintptr_t)qp->qp_sq_buf -
1683 1669 qp->qp_desc_off);
1684 1670 data->mqp_sq_numwqe = qp->qp_sq_bufsz;
1685 1671 data->mqp_sq_wqesz = (1 << qp->qp_sq_log_wqesz);
1686 1672 data->mqp_sq_headroomwqes = qp->qp_sq_hdrmwqes;
1687 1673
1688 1674 /* doorbell record fields */
1689 1675 data->mqp_sdbr_mapoffset = 0;
1690 1676 data->mqp_sdbr_maplen = 0;
1691 1677 data->mqp_sdbr_offset = 0;
1692 1678
1693 1679 return (DDI_SUCCESS);
1694 1680 }
1695 1681
1696 1682
1697 1683 /*
1698 1684 * hermon_umap_srq_data_out()
1699 1685 * Context: Can be called from user or kernel context.
1700 1686 */
1701 1687 static ibt_status_t
1702 1688 hermon_umap_srq_data_out(hermon_srqhdl_t srq, mlnx_umap_srq_data_out_t *data,
1703 1689 size_t data_sz)
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
1704 1690 {
1705 1691 /* Check for valid SRQ handle pointer */
1706 1692 if (srq == NULL) {
1707 1693 return (IBT_SRQ_HDL_INVALID);
1708 1694 }
1709 1695
1710 1696 /* Check for valid SRQ mapping structure size */
1711 1697 if (data_sz < sizeof (mlnx_umap_srq_data_out_t)) {
1712 1698 return (IBT_INSUFF_RESOURCE);
1713 1699 }
1714 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*data))
1715 1700
1716 1701 /*
1717 1702 * If it has passed all the checks, then fill in all the useful
1718 1703 * mapping information (including the mapping offset that will be
1719 1704 * passed back to the devmap() interface during a subsequent mmap()
1720 1705 * call.
1721 1706 *
1722 1707 * The "offset" for SRQ mmap()'s looks like this:
1723 1708 * +----------------------------------------+--------+--------------+
1724 1709 * | SRQ Number | 0x66 | Reserved (0) |
1725 1710 * +----------------------------------------+--------+--------------+
1726 1711 * (64 - 8 - PAGESHIFT) bits 8 bits PAGESHIFT bits
1727 1712 *
1728 1713 * This returns information about the mapping offset, the length of the
1729 1714 * SRQ memory, and the SRQ number (for use in later send and recv
1730 1715 * doorbells). It also returns the following information for the
1731 1716 * shared receive queue: the offset (from the base mapped address) of
1732 1717 * the start of the given work queue, the 64-bit IB virtual address
1733 1718 * that corresponds to the base mapped address (needed for posting WQEs
1734 1719 * though the QP doorbells), the number of WQEs the given work queue
1735 1720 * can hold, and the size of each WQE for the given work queue.
1736 1721 */
1737 1722 data->msrq_rev = MLNX_UMAP_IF_VERSION;
1738 1723 data->msrq_mapoffset = ((((uint64_t)srq->srq_srqnum <<
1739 1724 MLNX_UMAP_RSRC_TYPE_SHIFT) | MLNX_UMAP_SRQMEM_RSRC) << PAGESHIFT);
1740 1725 data->msrq_maplen = srq->srq_wqinfo.qa_size;
1741 1726 data->msrq_srqnum = srq->srq_srqnum;
1742 1727
1743 1728 data->msrq_desc_addr = (uint32_t)((uintptr_t)srq->srq_wq_buf -
1744 1729 srq->srq_desc_off);
1745 1730 data->msrq_numwqe = srq->srq_wq_bufsz;
1746 1731 data->msrq_wqesz = (1 << srq->srq_wq_log_wqesz);
1747 1732
1748 1733 /* doorbell record fields */
1749 1734 data->msrq_rdbr_mapoffset = srq->srq_rdbr_mapoffset;
1750 1735 data->msrq_rdbr_maplen = PAGESIZE;
1751 1736 data->msrq_rdbr_offset = (uintptr_t)srq->srq_wq_vdbr &
1752 1737 PAGEOFFSET;
1753 1738
1754 1739 return (DDI_SUCCESS);
1755 1740 }
1756 1741
1757 1742
1758 1743 /*
1759 1744 * hermon_umap_pd_data_out()
1760 1745 * Context: Can be called from user or kernel context.
1761 1746 */
1762 1747 static ibt_status_t
1763 1748 hermon_umap_pd_data_out(hermon_pdhdl_t pd, mlnx_umap_pd_data_out_t *data,
1764 1749 size_t data_sz)
↓ open down ↓ |
40 lines elided |
↑ open up ↑ |
1765 1750 {
1766 1751 /* Check for valid PD handle pointer */
1767 1752 if (pd == NULL) {
1768 1753 return (IBT_PD_HDL_INVALID);
1769 1754 }
1770 1755
1771 1756 /* Check for valid PD mapping structure size */
1772 1757 if (data_sz < sizeof (mlnx_umap_pd_data_out_t)) {
1773 1758 return (IBT_INSUFF_RESOURCE);
1774 1759 }
1775 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*data))
1776 1760
1777 1761 /*
1778 1762 * If it has passed all the checks, then fill the PD table index
1779 1763 * (the PD table allocated index for the PD pd_pdnum).
1780 1764 */
1781 1765 data->mpd_rev = MLNX_UMAP_IF_VERSION;
1782 1766 data->mpd_pdnum = pd->pd_pdnum;
1783 1767
1784 1768 return (DDI_SUCCESS);
1785 1769 }
1786 1770
1787 1771
1788 1772 /*
1789 1773 * hermon_umap_db_init()
1790 1774 * Context: Only called from attach() path context
1791 1775 */
1792 1776 void
1793 1777 hermon_umap_db_init(void)
1794 1778 {
1795 1779 /*
1796 1780 * Initialize the lock used by the Hermon "userland resources database"
1797 1781 * This is used to ensure atomic access to add, remove, and find
1798 1782 * entries in the database.
1799 1783 */
1800 1784 mutex_init(&hermon_userland_rsrc_db.hdl_umapdb_lock, NULL,
1801 1785 MUTEX_DRIVER, NULL);
1802 1786
1803 1787 /*
1804 1788 * Initialize the AVL tree used for the "userland resources
1805 1789 * database". Using an AVL tree here provides the ability to
1806 1790 * scale the database size to large numbers of resources. The
1807 1791 * entries in the tree are "hermon_umap_db_entry_t" (see
1808 1792 * hermon_umap.h). The tree is searched with the help of the
1809 1793 * hermon_umap_db_compare() routine.
1810 1794 */
1811 1795 avl_create(&hermon_userland_rsrc_db.hdl_umapdb_avl,
1812 1796 hermon_umap_db_compare, sizeof (hermon_umap_db_entry_t),
1813 1797 offsetof(hermon_umap_db_entry_t, hdbe_avlnode));
1814 1798 }
1815 1799
1816 1800
1817 1801 /*
1818 1802 * hermon_umap_db_fini()
1819 1803 * Context: Only called from attach() and/or detach() path contexts
1820 1804 */
1821 1805 void
1822 1806 hermon_umap_db_fini(void)
1823 1807 {
1824 1808 /* Destroy the AVL tree for the "userland resources database" */
1825 1809 avl_destroy(&hermon_userland_rsrc_db.hdl_umapdb_avl);
1826 1810
1827 1811 /* Destroy the lock for the "userland resources database" */
1828 1812 mutex_destroy(&hermon_userland_rsrc_db.hdl_umapdb_lock);
1829 1813 }
1830 1814
1831 1815
1832 1816 /*
1833 1817 * hermon_umap_db_alloc()
1834 1818 * Context: Can be called from user or kernel context.
1835 1819 */
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
1836 1820 hermon_umap_db_entry_t *
1837 1821 hermon_umap_db_alloc(uint_t instance, uint64_t key, uint_t type, uint64_t value)
1838 1822 {
1839 1823 hermon_umap_db_entry_t *umapdb;
1840 1824
1841 1825 /* Allocate an entry to add to the "userland resources database" */
1842 1826 umapdb = kmem_zalloc(sizeof (hermon_umap_db_entry_t), KM_NOSLEEP);
1843 1827 if (umapdb == NULL) {
1844 1828 return (NULL);
1845 1829 }
1846 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*umapdb))
1847 1830
1848 1831 /* Fill in the fields in the database entry */
1849 1832 umapdb->hdbe_common.hdb_instance = instance;
1850 1833 umapdb->hdbe_common.hdb_type = type;
1851 1834 umapdb->hdbe_common.hdb_key = key;
1852 1835 umapdb->hdbe_common.hdb_value = value;
1853 1836
1854 1837 return (umapdb);
1855 1838 }
1856 1839
1857 1840
1858 1841 /*
1859 1842 * hermon_umap_db_free()
1860 1843 * Context: Can be called from user or kernel context.
1861 1844 */
1862 1845 void
1863 1846 hermon_umap_db_free(hermon_umap_db_entry_t *umapdb)
1864 1847 {
1865 1848 /* Free the database entry */
1866 1849 kmem_free(umapdb, sizeof (hermon_umap_db_entry_t));
1867 1850 }
1868 1851
1869 1852
1870 1853 /*
1871 1854 * hermon_umap_db_add()
1872 1855 * Context: Can be called from user or kernel context.
1873 1856 */
1874 1857 void
1875 1858 hermon_umap_db_add(hermon_umap_db_entry_t *umapdb)
1876 1859 {
1877 1860 mutex_enter(&hermon_userland_rsrc_db.hdl_umapdb_lock);
1878 1861 hermon_umap_db_add_nolock(umapdb);
1879 1862 mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
1880 1863 }
1881 1864
1882 1865
1883 1866 /*
1884 1867 * hermon_umap_db_add_nolock()
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
1885 1868 * Context: Can be called from user or kernel context.
1886 1869 */
1887 1870 void
1888 1871 hermon_umap_db_add_nolock(hermon_umap_db_entry_t *umapdb)
1889 1872 {
1890 1873 hermon_umap_db_query_t query;
1891 1874 avl_index_t where;
1892 1875
1893 1876 ASSERT(MUTEX_HELD(&hermon_userland_rsrc_db.hdl_umapdb_lock));
1894 1877
1895 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*umapdb))
1896 -
1897 1878 /*
1898 1879 * Copy the common portion of the "to-be-added" database entry
1899 1880 * into the "hermon_umap_db_query_t" structure. We use this structure
1900 1881 * (with no flags set) to find the appropriate location in the
1901 1882 * "userland resources database" for the new entry to be added.
1902 1883 *
1903 1884 * Note: we expect that this entry should not be found in the
1904 1885 * database (unless something bad has happened).
1905 1886 */
1906 1887 query.hqdb_common = umapdb->hdbe_common;
1907 1888 query.hqdb_flags = 0;
1908 1889 (void) avl_find(&hermon_userland_rsrc_db.hdl_umapdb_avl, &query,
1909 1890 &where);
1910 1891
1911 1892 /*
1912 1893 * Now, using the "where" field from the avl_find() operation
1913 1894 * above, we will insert the new database entry ("umapdb").
1914 1895 */
1915 1896 avl_insert(&hermon_userland_rsrc_db.hdl_umapdb_avl, umapdb,
1916 1897 where);
1917 1898 }
1918 1899
1919 1900
1920 1901 /*
1921 1902 * hermon_umap_db_find()
1922 1903 * Context: Can be called from user or kernel context.
1923 1904 */
1924 1905 int
1925 1906 hermon_umap_db_find(uint_t instance, uint64_t key, uint_t type,
1926 1907 uint64_t *value, uint_t flag, hermon_umap_db_entry_t **umapdb)
1927 1908 {
1928 1909 int status;
1929 1910
1930 1911 mutex_enter(&hermon_userland_rsrc_db.hdl_umapdb_lock);
1931 1912 status = hermon_umap_db_find_nolock(instance, key, type, value, flag,
1932 1913 umapdb);
1933 1914 mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
1934 1915
1935 1916 return (status);
1936 1917 }
1937 1918
1938 1919
1939 1920 /*
1940 1921 * hermon_umap_db_find_nolock()
1941 1922 * Context: Can be called from user or kernel context.
1942 1923 */
1943 1924 int
1944 1925 hermon_umap_db_find_nolock(uint_t instance, uint64_t key, uint_t type,
1945 1926 uint64_t *value, uint_t flags, hermon_umap_db_entry_t **umapdb)
1946 1927 {
1947 1928 hermon_umap_db_query_t query;
1948 1929 hermon_umap_db_entry_t *entry;
1949 1930 avl_index_t where;
1950 1931
1951 1932 ASSERT(MUTEX_HELD(&hermon_userland_rsrc_db.hdl_umapdb_lock));
1952 1933
1953 1934 /*
1954 1935 * Fill in key, type, instance, and flags values of the
1955 1936 * hermon_umap_db_query_t in preparation for the database
1956 1937 * lookup.
1957 1938 */
1958 1939 query.hqdb_flags = flags;
1959 1940 query.hqdb_common.hdb_key = key;
1960 1941 query.hqdb_common.hdb_type = type;
1961 1942 query.hqdb_common.hdb_instance = instance;
↓ open down ↓ |
55 lines elided |
↑ open up ↑ |
1962 1943
1963 1944 /*
1964 1945 * Perform the database query. If no entry is found, then
1965 1946 * return failure, else continue.
1966 1947 */
1967 1948 entry = (hermon_umap_db_entry_t *)avl_find(
1968 1949 &hermon_userland_rsrc_db.hdl_umapdb_avl, &query, &where);
1969 1950 if (entry == NULL) {
1970 1951 return (DDI_FAILURE);
1971 1952 }
1972 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*entry))
1973 1953
1974 1954 /*
1975 1955 * If the flags argument specifies that the entry should
1976 1956 * be removed if found, then call avl_remove() to remove
1977 1957 * the entry from the database.
1978 1958 */
1979 1959 if (flags & HERMON_UMAP_DB_REMOVE) {
1980 1960
1981 1961 avl_remove(&hermon_userland_rsrc_db.hdl_umapdb_avl, entry);
1982 1962
1983 1963 /*
1984 1964 * The database entry is returned with the expectation
1985 1965 * that the caller will use hermon_umap_db_free() to
1986 1966 * free the entry's memory. ASSERT that this is non-NULL.
1987 1967 * NULL pointer should never be passed for the
1988 1968 * HERMON_UMAP_DB_REMOVE case.
1989 1969 */
1990 1970 ASSERT(umapdb != NULL);
1991 1971 }
1992 1972
1993 1973 /*
1994 1974 * If the caller would like visibility to the database entry
1995 1975 * (indicated through the use of a non-NULL "umapdb" argument),
1996 1976 * then fill it in.
1997 1977 */
1998 1978 if (umapdb != NULL) {
1999 1979 *umapdb = entry;
2000 1980 }
2001 1981
2002 1982 /* Extract value field from database entry and return success */
2003 1983 *value = entry->hdbe_common.hdb_value;
2004 1984
2005 1985 return (DDI_SUCCESS);
2006 1986 }
2007 1987
2008 1988
2009 1989 /*
2010 1990 * hermon_umap_umemlock_cb()
2011 1991 * Context: Can be called from callback context.
2012 1992 */
2013 1993 void
2014 1994 hermon_umap_umemlock_cb(ddi_umem_cookie_t *umem_cookie)
2015 1995 {
2016 1996 hermon_umap_db_entry_t *umapdb;
2017 1997 hermon_state_t *state;
2018 1998 hermon_rsrc_t *rsrcp;
2019 1999 hermon_mrhdl_t mr;
2020 2000 uint64_t value;
2021 2001 uint_t instance;
2022 2002 int status;
2023 2003 void (*mr_callback)(void *, void *);
2024 2004 void *mr_cbarg1, *mr_cbarg2;
2025 2005
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
2026 2006 /*
2027 2007 * If this was userland memory, then we need to remove its entry
2028 2008 * from the "userland resources database". Note: We use the
2029 2009 * HERMON_UMAP_DB_IGNORE_INSTANCE flag here because we don't know
2030 2010 * which instance was used when the entry was added (but we want
2031 2011 * to know after the entry is found using the other search criteria).
2032 2012 */
2033 2013 status = hermon_umap_db_find(0, (uint64_t)(uintptr_t)umem_cookie,
2034 2014 MLNX_UMAP_MRMEM_RSRC, &value, (HERMON_UMAP_DB_REMOVE |
2035 2015 HERMON_UMAP_DB_IGNORE_INSTANCE), &umapdb);
2036 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*umapdb))
2037 2016 if (status == DDI_SUCCESS) {
2038 2017 instance = umapdb->hdbe_common.hdb_instance;
2039 2018 state = ddi_get_soft_state(hermon_statep, instance);
2040 2019 if (state == NULL) {
2041 2020 cmn_err(CE_WARN, "Unable to match Hermon instance\n");
2042 2021 return;
2043 2022 }
2044 2023
2045 2024 /* Free the database entry */
2046 2025 hermon_umap_db_free(umapdb);
2047 2026
2048 2027 /* Use "value" to convert to an MR handle */
2049 2028 rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
2050 2029 mr = (hermon_mrhdl_t)rsrcp->hr_addr;
2051 2030
2052 2031 /*
2053 2032 * If a callback has been provided, call it first. This
2054 2033 * callback is expected to do any cleanup necessary to
2055 2034 * guarantee that the subsequent MR deregister (below)
2056 2035 * will succeed. Specifically, this means freeing up memory
2057 2036 * windows which might have been associated with the MR.
2058 2037 */
2059 2038 mutex_enter(&mr->mr_lock);
2060 2039 mr_callback = mr->mr_umem_cbfunc;
2061 2040 mr_cbarg1 = mr->mr_umem_cbarg1;
2062 2041 mr_cbarg2 = mr->mr_umem_cbarg2;
2063 2042 mutex_exit(&mr->mr_lock);
2064 2043 if (mr_callback != NULL) {
2065 2044 mr_callback(mr_cbarg1, mr_cbarg2);
2066 2045 }
2067 2046
2068 2047 /*
2069 2048 * Then call hermon_mr_deregister() to release the resources
2070 2049 * associated with the MR handle. Note: Because this routine
2071 2050 * will also check for whether the ddi_umem_cookie_t is in the
2072 2051 * database, it will take responsibility for disabling the
2073 2052 * memory region and calling ddi_umem_unlock().
2074 2053 */
2075 2054 status = hermon_mr_deregister(state, &mr, HERMON_MR_DEREG_ALL,
2076 2055 HERMON_SLEEP);
2077 2056 if (status != DDI_SUCCESS) {
2078 2057 HERMON_WARNING(state, "Unexpected failure in "
2079 2058 "deregister from callback\n");
2080 2059 }
2081 2060 }
2082 2061 }
2083 2062
2084 2063
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
2085 2064 /*
2086 2065 * hermon_umap_db_compare()
2087 2066 * Context: Can be called from user or kernel context.
2088 2067 */
2089 2068 static int
2090 2069 hermon_umap_db_compare(const void *q, const void *e)
2091 2070 {
2092 2071 hermon_umap_db_common_t *entry_common, *query_common;
2093 2072 uint_t query_flags;
2094 2073
2095 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*((hermon_umap_db_query_t *)q)))
2096 -
2097 2074 entry_common = &((hermon_umap_db_entry_t *)e)->hdbe_common;
2098 2075 query_common = &((hermon_umap_db_query_t *)q)->hqdb_common;
2099 2076 query_flags = ((hermon_umap_db_query_t *)q)->hqdb_flags;
2100 2077
2101 2078 /*
2102 2079 * The first comparison is done on the "key" value in "query"
2103 2080 * and "entry". If they are not equal, then the appropriate
2104 2081 * search direction is returned. Else, we continue by
2105 2082 * comparing "type".
2106 2083 */
2107 2084 if (query_common->hdb_key < entry_common->hdb_key) {
2108 2085 return (-1);
2109 2086 } else if (query_common->hdb_key > entry_common->hdb_key) {
2110 2087 return (+1);
2111 2088 }
2112 2089
2113 2090 /*
2114 2091 * If the search reaches this point, then "query" and "entry"
2115 2092 * have equal key values. So we continue be comparing their
2116 2093 * "type" values. Again, if they are not equal, then the
2117 2094 * appropriate search direction is returned. Else, we continue
2118 2095 * by comparing "instance".
2119 2096 */
2120 2097 if (query_common->hdb_type < entry_common->hdb_type) {
2121 2098 return (-1);
2122 2099 } else if (query_common->hdb_type > entry_common->hdb_type) {
2123 2100 return (+1);
2124 2101 }
2125 2102
2126 2103 /*
2127 2104 * If the search reaches this point, then "query" and "entry"
2128 2105 * have exactly the same key and type values. Now we consult
2129 2106 * the "flags" field in the query to determine whether the
2130 2107 * "instance" is relevant to the search. If the
2131 2108 * HERMON_UMAP_DB_IGNORE_INSTANCE flags is set, then return
2132 2109 * success (0) here. Otherwise, continue the search by comparing
2133 2110 * instance values and returning the appropriate search direction.
2134 2111 */
2135 2112 if (query_flags & HERMON_UMAP_DB_IGNORE_INSTANCE) {
2136 2113 return (0);
2137 2114 }
2138 2115
2139 2116 /*
2140 2117 * If the search has reached this point, then "query" and "entry"
2141 2118 * can only be differentiated by their instance values. If these
2142 2119 * are not equal, then return the appropriate search direction.
2143 2120 * Else, we return success (0).
2144 2121 */
2145 2122 if (query_common->hdb_instance < entry_common->hdb_instance) {
2146 2123 return (-1);
2147 2124 } else if (query_common->hdb_instance > entry_common->hdb_instance) {
2148 2125 return (+1);
2149 2126 }
2150 2127
2151 2128 /* Everything matches... so return success */
2152 2129 return (0);
2153 2130 }
2154 2131
2155 2132
2156 2133 /*
2157 2134 * hermon_umap_db_set_onclose_cb()
2158 2135 * Context: Can be called from user or kernel context.
2159 2136 */
2160 2137 int
2161 2138 hermon_umap_db_set_onclose_cb(dev_t dev, uint64_t flag,
2162 2139 int (*callback)(void *), void *arg)
2163 2140 {
2164 2141 hermon_umap_db_priv_t *priv;
2165 2142 hermon_umap_db_entry_t *umapdb;
2166 2143 minor_t instance;
2167 2144 uint64_t value;
2168 2145 int status;
2169 2146
2170 2147 instance = HERMON_DEV_INSTANCE(dev);
2171 2148 if (instance == (minor_t)-1) {
2172 2149 return (DDI_FAILURE);
2173 2150 }
2174 2151
2175 2152 if (flag != HERMON_ONCLOSE_FLASH_INPROGRESS) {
2176 2153 return (DDI_FAILURE);
2177 2154 }
2178 2155
2179 2156 /*
2180 2157 * Grab the lock for the "userland resources database" and find
2181 2158 * the entry corresponding to this minor number. Once it's found,
2182 2159 * allocate (if necessary) and add an entry (in the "hdb_priv"
2183 2160 * field) to indicate that further processing may be needed during
2184 2161 * Hermon's close() handling.
2185 2162 */
2186 2163 mutex_enter(&hermon_userland_rsrc_db.hdl_umapdb_lock);
2187 2164 status = hermon_umap_db_find_nolock(instance, dev,
2188 2165 MLNX_UMAP_PID_RSRC, &value, 0, &umapdb);
2189 2166 if (status != DDI_SUCCESS) {
2190 2167 mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
2191 2168 return (DDI_FAILURE);
2192 2169 }
2193 2170
2194 2171 priv = (hermon_umap_db_priv_t *)umapdb->hdbe_common.hdb_priv;
2195 2172 if (priv == NULL) {
2196 2173 priv = (hermon_umap_db_priv_t *)kmem_zalloc(
2197 2174 sizeof (hermon_umap_db_priv_t), KM_NOSLEEP);
2198 2175 if (priv == NULL) {
2199 2176 mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
2200 2177 return (DDI_FAILURE);
2201 2178 }
2202 2179 }
2203 2180
2204 2181 /*
2205 2182 * Save away the callback and argument to be used during Hermon's
2206 2183 * close() processing.
2207 2184 */
2208 2185 priv->hdp_cb = callback;
2209 2186 priv->hdp_arg = arg;
2210 2187
2211 2188 umapdb->hdbe_common.hdb_priv = (void *)priv;
2212 2189 mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
2213 2190
2214 2191 return (DDI_SUCCESS);
2215 2192 }
2216 2193
2217 2194
2218 2195 /*
2219 2196 * hermon_umap_db_clear_onclose_cb()
2220 2197 * Context: Can be called from user or kernel context.
2221 2198 */
2222 2199 int
2223 2200 hermon_umap_db_clear_onclose_cb(dev_t dev, uint64_t flag)
2224 2201 {
2225 2202 hermon_umap_db_priv_t *priv;
2226 2203 hermon_umap_db_entry_t *umapdb;
2227 2204 minor_t instance;
2228 2205 uint64_t value;
2229 2206 int status;
2230 2207
2231 2208 instance = HERMON_DEV_INSTANCE(dev);
2232 2209 if (instance == (minor_t)-1) {
2233 2210 return (DDI_FAILURE);
2234 2211 }
2235 2212
2236 2213 if (flag != HERMON_ONCLOSE_FLASH_INPROGRESS) {
2237 2214 return (DDI_FAILURE);
2238 2215 }
2239 2216
2240 2217 /*
2241 2218 * Grab the lock for the "userland resources database" and find
2242 2219 * the entry corresponding to this minor number. Once it's found,
2243 2220 * remove the entry (in the "hdb_priv" field) that indicated the
2244 2221 * need for further processing during Hermon's close(). Free the
2245 2222 * entry, if appropriate.
2246 2223 */
2247 2224 mutex_enter(&hermon_userland_rsrc_db.hdl_umapdb_lock);
2248 2225 status = hermon_umap_db_find_nolock(instance, dev,
2249 2226 MLNX_UMAP_PID_RSRC, &value, 0, &umapdb);
2250 2227 if (status != DDI_SUCCESS) {
2251 2228 mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
2252 2229 return (DDI_FAILURE);
2253 2230 }
2254 2231
2255 2232 priv = (hermon_umap_db_priv_t *)umapdb->hdbe_common.hdb_priv;
2256 2233 if (priv != NULL) {
2257 2234 kmem_free(priv, sizeof (hermon_umap_db_priv_t));
2258 2235 priv = NULL;
2259 2236 }
2260 2237
2261 2238 umapdb->hdbe_common.hdb_priv = (void *)priv;
2262 2239 mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
2263 2240 return (DDI_SUCCESS);
2264 2241 }
2265 2242
2266 2243
2267 2244 /*
2268 2245 * hermon_umap_db_clear_onclose_cb()
2269 2246 * Context: Can be called from user or kernel context.
2270 2247 */
2271 2248 int
2272 2249 hermon_umap_db_handle_onclose_cb(hermon_umap_db_priv_t *priv)
2273 2250 {
2274 2251 int (*callback)(void *);
2275 2252
2276 2253 ASSERT(MUTEX_HELD(&hermon_userland_rsrc_db.hdl_umapdb_lock));
2277 2254
2278 2255 /*
2279 2256 * Call the callback.
2280 2257 * Note: Currently there is only one callback (in "hdp_cb"), but
2281 2258 * in the future there may be more, depending on what other types
2282 2259 * of interaction there are between userland processes and the
2283 2260 * driver.
2284 2261 */
2285 2262 callback = priv->hdp_cb;
2286 2263 return (callback(priv->hdp_arg));
2287 2264 }
↓ open down ↓ |
181 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX