Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/io/iommulib.c
+++ new/usr/src/uts/intel/io/iommulib.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
24 24 */
25 25
26 26 #pragma ident "@(#)iommulib.c 1.6 08/09/07 SMI"
27 27
28 28 #include <sys/sunddi.h>
29 29 #include <sys/sunndi.h>
30 30 #include <sys/errno.h>
31 31 #include <sys/modctl.h>
32 32 #include <sys/iommulib.h>
33 33
34 34 /* ******** Type definitions private to this file ********************** */
35 35
36 36 /* 1 per IOMMU unit. There may be more than one per dip */
37 37 typedef struct iommulib_unit {
38 38 kmutex_t ilu_lock;
39 39 uint64_t ilu_ref;
40 40 uint32_t ilu_unitid;
41 41 dev_info_t *ilu_dip;
42 42 iommulib_ops_t *ilu_ops;
43 43 void* ilu_data;
44 44 struct iommulib_unit *ilu_next;
45 45 struct iommulib_unit *ilu_prev;
46 46 iommulib_nexhandle_t ilu_nex;
47 47 } iommulib_unit_t;
48 48
49 49 typedef struct iommulib_nex {
50 50 dev_info_t *nex_dip;
51 51 iommulib_nexops_t nex_ops;
52 52 struct iommulib_nex *nex_next;
53 53 struct iommulib_nex *nex_prev;
54 54 uint_t nex_ref;
55 55 } iommulib_nex_t;
56 56
57 57 /* ********* Globals ************************ */
58 58
59 59 /* For IOMMU drivers */
60 60 smbios_hdl_t *iommulib_smbios;
61 61
62 62 /* IOMMU side: Following data protected by lock */
63 63 static kmutex_t iommulib_lock;
64 64 static iommulib_unit_t *iommulib_list;
65 65 static uint64_t iommulib_unit_ids = 0;
66 66 static uint64_t iommulib_num_units = 0;
67 67
68 68 /* rootnex side data */
69 69
70 70 static kmutex_t iommulib_nexus_lock;
71 71 static iommulib_nex_t *iommulib_nexus_list;
72 72
73 73 /* can be set atomically without lock */
74 74 static volatile uint32_t iommulib_fini;
75 75
76 76 /* debug flag */
↓ open down ↓ |
76 lines elided |
↑ open up ↑ |
77 77 static int iommulib_debug;
78 78
79 79 /*
80 80 * Module linkage information for the kernel.
81 81 */
82 82 static struct modlmisc modlmisc = {
83 83 &mod_miscops, "IOMMU library module"
84 84 };
85 85
86 86 static struct modlinkage modlinkage = {
87 - MODREV_1, (void *)&modlmisc, NULL
87 + MODREV_1, { (void *)&modlmisc, NULL }
88 88 };
89 89
90 90 int
91 91 _init(void)
92 92 {
93 93 return (mod_install(&modlinkage));
94 94 }
95 95
96 96 int
97 97 _fini(void)
98 98 {
99 99 mutex_enter(&iommulib_lock);
100 100 if (iommulib_list != NULL || iommulib_nexus_list != NULL) {
101 101 mutex_exit(&iommulib_lock);
102 102 return (EBUSY);
103 103 }
104 104 iommulib_fini = 1;
105 105
106 106 mutex_exit(&iommulib_lock);
107 107 return (mod_remove(&modlinkage));
108 108 }
109 109
110 110 int
111 111 _info(struct modinfo *modinfop)
112 112 {
113 113 return (mod_info(&modlinkage, modinfop));
114 114 }
115 115
116 116 /*
117 117 * Routines with iommulib_iommu_* are invoked from the
118 118 * IOMMU driver.
119 119 * Routines with iommulib_nex* are invoked from the
120 120 * nexus driver (typically rootnex)
121 121 */
122 122
123 123 int
124 124 iommulib_nexus_register(dev_info_t *dip, iommulib_nexops_t *nexops,
125 125 iommulib_nexhandle_t *handle)
126 126 {
127 127 iommulib_nex_t *nexp;
128 128 int instance = ddi_get_instance(dip);
129 129 const char *driver = ddi_driver_name(dip);
130 130 dev_info_t *pdip = ddi_get_parent(dip);
131 131 const char *f = "iommulib_nexus_register";
132 132
133 133 ASSERT(nexops);
134 134 ASSERT(handle);
135 135
136 136 *handle = NULL;
137 137
138 138 /*
139 139 * Root node is never busy held
140 140 */
141 141 if (dip != ddi_root_node() && (i_ddi_node_state(dip) < DS_PROBED ||
142 142 !DEVI_BUSY_OWNED(pdip))) {
143 143 cmn_err(CE_WARN, "%s: NEXUS devinfo node not in DS_PROBED "
144 144 "or busy held for nexops vector (%p). Failing registration",
145 145 f, (void *)nexops);
146 146 return (DDI_FAILURE);
147 147 }
148 148
149 149 if (nexops->nops_vers != IOMMU_NEXOPS_VERSION) {
150 150 cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB nexops version "
151 151 "in nexops vector (%p). Failing NEXUS registration",
152 152 f, driver, instance, (void *)nexops);
153 153 return (DDI_FAILURE);
154 154 }
155 155
156 156 ASSERT(nexops->nops_data == NULL);
157 157
158 158 if (nexops->nops_id == NULL) {
159 159 cmn_err(CE_WARN, "%s: %s%d: NULL ID field. "
160 160 "Failing registration for nexops vector: %p",
161 161 f, driver, instance, (void *)nexops);
162 162 return (DDI_FAILURE);
163 163 }
164 164
165 165 if (nexops->nops_dma_allochdl == NULL) {
166 166 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_allochdl op. "
167 167 "Failing registration for ops vector: %p", f,
168 168 driver, instance, (void *)nexops);
169 169 return (DDI_FAILURE);
170 170 }
171 171
172 172 if (nexops->nops_dma_freehdl == NULL) {
173 173 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_freehdl op. "
174 174 "Failing registration for ops vector: %p", f,
175 175 driver, instance, (void *)nexops);
176 176 return (DDI_FAILURE);
177 177 }
178 178
179 179 if (nexops->nops_dma_bindhdl == NULL) {
180 180 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_bindhdl op. "
181 181 "Failing registration for ops vector: %p", f,
182 182 driver, instance, (void *)nexops);
183 183 return (DDI_FAILURE);
184 184 }
185 185
186 186 if (nexops->nops_dma_sync == NULL) {
187 187 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_sync op. "
188 188 "Failing registration for ops vector: %p", f,
189 189 driver, instance, (void *)nexops);
190 190 return (DDI_FAILURE);
191 191 }
192 192
193 193 if (nexops->nops_dma_reset_cookies == NULL) {
194 194 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_reset_cookies op. "
195 195 "Failing registration for ops vector: %p", f,
196 196 driver, instance, (void *)nexops);
197 197 return (DDI_FAILURE);
198 198 }
199 199
200 200 if (nexops->nops_dma_get_cookies == NULL) {
201 201 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_get_cookies op. "
202 202 "Failing registration for ops vector: %p", f,
203 203 driver, instance, (void *)nexops);
204 204 return (DDI_FAILURE);
205 205 }
206 206
207 207 if (nexops->nops_dma_set_cookies == NULL) {
208 208 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_set_cookies op. "
209 209 "Failing registration for ops vector: %p", f,
210 210 driver, instance, (void *)nexops);
211 211 return (DDI_FAILURE);
212 212 }
213 213
214 214 if (nexops->nops_dma_clear_cookies == NULL) {
215 215 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_clear_cookies op. "
216 216 "Failing registration for ops vector: %p", f,
217 217 driver, instance, (void *)nexops);
218 218 return (DDI_FAILURE);
219 219 }
220 220
221 221 if (nexops->nops_dma_get_sleep_flags == NULL) {
222 222 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_get_sleep_flags op. "
223 223 "Failing registration for ops vector: %p", f,
224 224 driver, instance, (void *)nexops);
225 225 return (DDI_FAILURE);
226 226 }
227 227
228 228 if (nexops->nops_dma_win == NULL) {
229 229 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_win op. "
230 230 "Failing registration for ops vector: %p", f,
231 231 driver, instance, (void *)nexops);
232 232 return (DDI_FAILURE);
233 233 }
234 234
235 235 if (nexops->nops_dmahdl_setprivate == NULL) {
236 236 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dmahdl_setprivate op. "
237 237 "Failing registration for ops vector: %p", f,
238 238 driver, instance, (void *)nexops);
239 239 return (DDI_FAILURE);
240 240 }
241 241
242 242 if (nexops->nops_dmahdl_getprivate == NULL) {
243 243 cmn_err(CE_WARN, "%s: %s%d: NULL nops_dmahdl_getprivate op. "
244 244 "Failing registration for ops vector: %p", f,
245 245 driver, instance, (void *)nexops);
246 246 return (DDI_FAILURE);
247 247 }
248 248
249 249 nexp = kmem_zalloc(sizeof (iommulib_nex_t), KM_SLEEP);
250 250
251 251 mutex_enter(&iommulib_lock);
252 252 if (iommulib_fini == 1) {
253 253 mutex_exit(&iommulib_lock);
254 254 cmn_err(CE_WARN, "%s: IOMMULIB unloading. "
255 255 "Failing NEXUS register.", f);
256 256 kmem_free(nexp, sizeof (iommulib_nex_t));
257 257 return (DDI_FAILURE);
258 258 }
259 259
260 260 /*
261 261 * fini/register race conditions have been handled. Now create the
262 262 * nexus struct
263 263 */
264 264 ndi_hold_devi(dip);
265 265 nexp->nex_dip = dip;
266 266 nexp->nex_ops = *nexops;
267 267
268 268 mutex_enter(&iommulib_nexus_lock);
269 269 nexp->nex_next = iommulib_nexus_list;
270 270 iommulib_nexus_list = nexp;
271 271 nexp->nex_prev = NULL;
272 272
273 273 if (nexp->nex_next != NULL)
274 274 nexp->nex_next->nex_prev = nexp;
275 275
276 276 nexp->nex_ref = 0;
277 277
278 278 /*
279 279 * The nexus device won't be controlled by an IOMMU.
280 280 */
281 281 DEVI(dip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
282 282
283 283 DEVI(dip)->devi_iommulib_nex_handle = nexp;
284 284
285 285 mutex_exit(&iommulib_nexus_lock);
286 286 mutex_exit(&iommulib_lock);
287 287
288 288 cmn_err(CE_NOTE, "!%s: %s%d: Succesfully registered NEXUS %s "
289 289 "nexops=%p", f, driver, instance, ddi_node_name(dip),
290 290 (void *)nexops);
291 291
292 292 *handle = nexp;
293 293
294 294 return (DDI_SUCCESS);
295 295 }
296 296
297 297 int
298 298 iommulib_nexus_unregister(iommulib_nexhandle_t handle)
299 299 {
300 300 dev_info_t *dip;
301 301 int instance;
302 302 const char *driver;
303 303 iommulib_nex_t *nexp = (iommulib_nex_t *)handle;
304 304 const char *f = "iommulib_nexus_unregister";
305 305
306 306 ASSERT(nexp);
307 307
308 308 if (nexp->nex_ref != 0)
309 309 return (DDI_FAILURE);
310 310
311 311 mutex_enter(&iommulib_nexus_lock);
312 312
313 313 dip = nexp->nex_dip;
314 314 driver = ddi_driver_name(dip);
315 315 instance = ddi_get_instance(dip);
316 316
317 317 /* A future enhancement would be to add ref-counts */
318 318
319 319 if (nexp->nex_prev == NULL) {
320 320 iommulib_nexus_list = nexp->nex_next;
321 321 } else {
322 322 nexp->nex_prev->nex_next = nexp->nex_next;
323 323 }
324 324
325 325 if (nexp->nex_next != NULL)
326 326 nexp->nex_next->nex_prev = nexp->nex_prev;
327 327
328 328 mutex_exit(&iommulib_nexus_lock);
329 329
330 330 kmem_free(nexp, sizeof (iommulib_nex_t));
331 331
332 332 cmn_err(CE_NOTE, "!%s: %s%d: NEXUS (%s) handle successfully "
333 333 "unregistered from IOMMULIB", f, driver, instance,
334 334 ddi_node_name(dip));
335 335
336 336 ndi_rele_devi(dip);
337 337
338 338 return (DDI_SUCCESS);
339 339 }
340 340
341 341 int
342 342 iommulib_iommu_register(dev_info_t *dip, iommulib_ops_t *ops,
343 343 iommulib_handle_t *handle)
344 344 {
345 345 const char *vendor;
346 346 iommulib_unit_t *unitp;
347 347 int instance = ddi_get_instance(dip);
348 348 const char *driver = ddi_driver_name(dip);
349 349 const char *f = "iommulib_register";
350 350
351 351 ASSERT(ops);
352 352 ASSERT(handle);
353 353
354 354 if (ops->ilops_vers != IOMMU_OPS_VERSION) {
355 355 cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB ops version "
356 356 "in ops vector (%p). Failing registration", f, driver,
357 357 instance, (void *)ops);
358 358 return (DDI_FAILURE);
359 359 }
360 360
361 361 switch (ops->ilops_vendor) {
362 362 case AMD_IOMMU:
363 363 vendor = "AMD";
364 364 break;
365 365 case INTEL_IOMMU:
366 366 vendor = "Intel";
367 367 break;
368 368 case INVALID_VENDOR:
369 369 cmn_err(CE_WARN, "%s: %s%d: vendor field (%x) not initialized. "
370 370 "Failing registration for ops vector: %p", f,
371 371 driver, instance, ops->ilops_vendor, (void *)ops);
372 372 return (DDI_FAILURE);
373 373 default:
374 374 cmn_err(CE_WARN, "%s: %s%d: Invalid vendor field (%x). "
375 375 "Failing registration for ops vector: %p", f,
376 376 driver, instance, ops->ilops_vendor, (void *)ops);
377 377 return (DDI_FAILURE);
378 378 }
379 379
380 380 cmn_err(CE_NOTE, "!%s: %s%d: Detected IOMMU registration from vendor"
381 381 " %s", f, driver, instance, vendor);
382 382
383 383 if (ops->ilops_data == NULL) {
384 384 cmn_err(CE_WARN, "%s: %s%d: NULL IOMMU data field. "
385 385 "Failing registration for ops vector: %p", f,
386 386 driver, instance, (void *)ops);
387 387 return (DDI_FAILURE);
388 388 }
389 389
390 390 if (ops->ilops_id == NULL) {
391 391 cmn_err(CE_WARN, "%s: %s%d: NULL ID field. "
392 392 "Failing registration for ops vector: %p", f,
393 393 driver, instance, (void *)ops);
394 394 return (DDI_FAILURE);
395 395 }
396 396
397 397 if (ops->ilops_probe == NULL) {
398 398 cmn_err(CE_WARN, "%s: %s%d: NULL probe op. "
399 399 "Failing registration for ops vector: %p", f,
400 400 driver, instance, (void *)ops);
401 401 return (DDI_FAILURE);
402 402 }
403 403
404 404 if (ops->ilops_dma_allochdl == NULL) {
405 405 cmn_err(CE_WARN, "%s: %s%d: NULL dma_allochdl op. "
406 406 "Failing registration for ops vector: %p", f,
407 407 driver, instance, (void *)ops);
408 408 return (DDI_FAILURE);
409 409 }
410 410
411 411 if (ops->ilops_dma_freehdl == NULL) {
412 412 cmn_err(CE_WARN, "%s: %s%d: NULL dma_freehdl op. "
413 413 "Failing registration for ops vector: %p", f,
414 414 driver, instance, (void *)ops);
415 415 return (DDI_FAILURE);
416 416 }
417 417
418 418 if (ops->ilops_dma_bindhdl == NULL) {
419 419 cmn_err(CE_WARN, "%s: %s%d: NULL dma_bindhdl op. "
420 420 "Failing registration for ops vector: %p", f,
421 421 driver, instance, (void *)ops);
422 422 return (DDI_FAILURE);
423 423 }
424 424
425 425 if (ops->ilops_dma_sync == NULL) {
426 426 cmn_err(CE_WARN, "%s: %s%d: NULL dma_sync op. "
427 427 "Failing registration for ops vector: %p", f,
428 428 driver, instance, (void *)ops);
429 429 return (DDI_FAILURE);
430 430 }
431 431
432 432 if (ops->ilops_dma_win == NULL) {
433 433 cmn_err(CE_WARN, "%s: %s%d: NULL dma_win op. "
434 434 "Failing registration for ops vector: %p", f,
435 435 driver, instance, (void *)ops);
436 436 return (DDI_FAILURE);
437 437 }
438 438
439 439 unitp = kmem_zalloc(sizeof (iommulib_unit_t), KM_SLEEP);
440 440 mutex_enter(&iommulib_lock);
441 441 if (iommulib_fini == 1) {
442 442 mutex_exit(&iommulib_lock);
443 443 cmn_err(CE_WARN, "%s: IOMMULIB unloading. Failing register.",
444 444 f);
445 445 kmem_free(unitp, sizeof (iommulib_unit_t));
446 446 return (DDI_FAILURE);
447 447 }
448 448
449 449 /*
450 450 * fini/register race conditions have been handled. Now create the
451 451 * IOMMU unit
452 452 */
453 453 mutex_init(&unitp->ilu_lock, NULL, MUTEX_DEFAULT, NULL);
454 454
455 455 mutex_enter(&unitp->ilu_lock);
456 456 unitp->ilu_unitid = ++iommulib_unit_ids;
457 457 unitp->ilu_ref = 0;
458 458 ndi_hold_devi(dip);
459 459 unitp->ilu_dip = dip;
460 460 unitp->ilu_ops = ops;
461 461 unitp->ilu_data = ops->ilops_data;
462 462
463 463 unitp->ilu_next = iommulib_list;
464 464 iommulib_list = unitp;
465 465 unitp->ilu_prev = NULL;
466 466 if (unitp->ilu_next)
467 467 unitp->ilu_next->ilu_prev = unitp;
468 468
469 469 /*
470 470 * The IOMMU device itself is not controlled by an IOMMU.
471 471 */
472 472 DEVI(dip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
473 473
474 474 mutex_exit(&unitp->ilu_lock);
475 475
476 476 iommulib_num_units++;
477 477
478 478 *handle = unitp;
479 479
480 480 mutex_exit(&iommulib_lock);
481 481
482 482 cmn_err(CE_NOTE, "!%s: %s%d: Succesfully registered IOMMU unit "
483 483 "from vendor=%s, ops=%p, data=%p, IOMMULIB unitid=%u",
484 484 f, driver, instance, vendor, (void *)ops, (void *)unitp->ilu_data,
485 485 unitp->ilu_unitid);
486 486
487 487 return (DDI_SUCCESS);
488 488 }
489 489
490 490 int
491 491 iommulib_iommu_unregister(iommulib_handle_t handle)
492 492 {
493 493 uint32_t unitid;
494 494 dev_info_t *dip;
495 495 int instance;
496 496 const char *driver;
497 497 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
498 498 const char *f = "iommulib_unregister";
499 499
500 500 ASSERT(unitp);
501 501
502 502 mutex_enter(&iommulib_lock);
503 503 mutex_enter(&unitp->ilu_lock);
504 504
505 505 unitid = unitp->ilu_unitid;
506 506 dip = unitp->ilu_dip;
507 507 driver = ddi_driver_name(dip);
508 508 instance = ddi_get_instance(dip);
509 509
510 510 if (unitp->ilu_ref != 0) {
511 511 mutex_exit(&unitp->ilu_lock);
512 512 mutex_exit(&iommulib_lock);
513 513 cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle is busy. Cannot "
514 514 "unregister IOMMULIB unitid %u",
515 515 f, driver, instance, unitid);
516 516 return (DDI_FAILURE);
517 517 }
518 518 unitp->ilu_unitid = 0;
519 519 ASSERT(unitp->ilu_ref == 0);
520 520
521 521 if (unitp->ilu_prev == NULL) {
522 522 iommulib_list = unitp->ilu_next;
523 523 unitp->ilu_next->ilu_prev = NULL;
524 524 } else {
525 525 unitp->ilu_prev->ilu_next = unitp->ilu_next;
526 526 unitp->ilu_next->ilu_prev = unitp->ilu_prev;
527 527 }
528 528
529 529 iommulib_num_units--;
530 530
531 531 mutex_exit(&unitp->ilu_lock);
532 532
533 533 mutex_destroy(&unitp->ilu_lock);
534 534 kmem_free(unitp, sizeof (iommulib_unit_t));
535 535
536 536 mutex_exit(&iommulib_lock);
537 537
538 538 cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle (unitid=%u) successfully "
539 539 "unregistered", f, driver, instance, unitid);
540 540
541 541 ndi_rele_devi(dip);
542 542
543 543 return (DDI_SUCCESS);
544 544 }
545 545
546 546 int
547 547 iommulib_nex_open(dev_info_t *dip, dev_info_t *rdip)
548 548 {
549 549 iommulib_unit_t *unitp;
550 550 int instance = ddi_get_instance(rdip);
551 551 const char *driver = ddi_driver_name(rdip);
552 552 const char *f = "iommulib_nex_open";
553 553
554 554 ASSERT(DEVI(dip)->devi_iommulib_nex_handle != NULL);
555 555 ASSERT(DEVI(rdip)->devi_iommulib_handle == NULL);
556 556
557 557 /* prevent use of IOMMU for AMD IOMMU's DMA */
558 558 if (strcmp(driver, "amd_iommu") == 0) {
559 559 DEVI(rdip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
560 560 return (DDI_ENOTSUP);
561 561 }
562 562
563 563 /*
564 564 * Use the probe entry point to determine in a hardware specific
565 565 * manner whether this dip is controlled by an IOMMU. If yes,
566 566 * return the handle corresponding to the IOMMU unit.
567 567 */
568 568
569 569 mutex_enter(&iommulib_lock);
570 570 for (unitp = iommulib_list; unitp; unitp = unitp->ilu_next) {
571 571 if (unitp->ilu_ops->ilops_probe(unitp, rdip) == DDI_SUCCESS)
572 572 break;
573 573 }
574 574
575 575 if (unitp == NULL) {
576 576 mutex_exit(&iommulib_lock);
577 577 if (iommulib_debug) {
578 578 char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
579 579 cmn_err(CE_WARN, "%s: %s%d: devinfo node (%p): is not "
580 580 "controlled by an IOMMU: path=%s", f, driver,
581 581 instance, (void *)rdip, ddi_pathname(rdip, buf));
582 582 kmem_free(buf, MAXPATHLEN);
583 583 }
584 584 DEVI(rdip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
585 585 return (DDI_ENOTSUP);
586 586 }
587 587
588 588 mutex_enter(&unitp->ilu_lock);
589 589 unitp->ilu_nex = DEVI(dip)->devi_iommulib_nex_handle;
590 590 unitp->ilu_ref++;
591 591 DEVI(rdip)->devi_iommulib_handle = unitp;
592 592 mutex_exit(&unitp->ilu_lock);
593 593 mutex_exit(&iommulib_lock);
594 594
595 595 atomic_inc_uint(&DEVI(dip)->devi_iommulib_nex_handle->nex_ref);
596 596
597 597 return (DDI_SUCCESS);
598 598 }
599 599
600 600 void
601 601 iommulib_nex_close(dev_info_t *rdip)
602 602 {
603 603 iommulib_unit_t *unitp;
604 604 const char *driver;
605 605 int instance;
606 606 uint32_t unitid;
607 607 iommulib_nex_t *nexp;
608 608 const char *f = "iommulib_nex_close";
609 609
610 610 ASSERT(IOMMU_USED(rdip));
611 611
612 612 unitp = DEVI(rdip)->devi_iommulib_handle;
613 613
614 614 mutex_enter(&iommulib_lock);
615 615 mutex_enter(&unitp->ilu_lock);
616 616
617 617 nexp = (iommulib_nex_t *)unitp->ilu_nex;
618 618 DEVI(rdip)->devi_iommulib_handle = NULL;
619 619
620 620 unitid = unitp->ilu_unitid;
621 621 driver = ddi_driver_name(unitp->ilu_dip);
622 622 instance = ddi_get_instance(unitp->ilu_dip);
623 623
624 624 unitp->ilu_ref--;
625 625 mutex_exit(&unitp->ilu_lock);
626 626 mutex_exit(&iommulib_lock);
627 627
628 628 atomic_dec_uint(&nexp->nex_ref);
629 629
630 630 if (iommulib_debug) {
631 631 char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
632 632 (void) ddi_pathname(rdip, buf);
633 633 cmn_err(CE_NOTE, "%s: %s%d: closing IOMMU for dip (%p), "
634 634 "unitid=%u rdip path = %s", f, driver, instance,
635 635 (void *)rdip, unitid, buf);
636 636 kmem_free(buf, MAXPATHLEN);
637 637 }
638 638 }
639 639
640 640 int
641 641 iommulib_nexdma_allochdl(dev_info_t *dip, dev_info_t *rdip,
642 642 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t),
643 643 caddr_t arg, ddi_dma_handle_t *dma_handlep)
644 644 {
645 645 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
646 646 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
647 647
648 648 ASSERT(unitp);
649 649
650 650 /* No need to grab lock - the handle is reference counted */
651 651 return (unitp->ilu_ops->ilops_dma_allochdl(handle, dip, rdip,
652 652 attr, waitfp, arg, dma_handlep));
653 653 }
654 654
655 655 int
656 656 iommulib_nexdma_freehdl(dev_info_t *dip, dev_info_t *rdip,
657 657 ddi_dma_handle_t dma_handle)
658 658 {
659 659 int error;
660 660 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
661 661 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
662 662
663 663 ASSERT(unitp);
664 664
665 665 /* No need to grab lock - the handle is reference counted */
666 666 error = unitp->ilu_ops->ilops_dma_freehdl(handle, dip,
667 667 rdip, dma_handle);
668 668
669 669 return (error);
670 670 }
671 671
672 672 int
673 673 iommulib_nexdma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
674 674 ddi_dma_handle_t dma_handle, struct ddi_dma_req *dmareq,
675 675 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
676 676 {
677 677 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
678 678 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
679 679
680 680 ASSERT(unitp);
681 681
682 682 /* No need to grab lock - the handle is reference counted */
683 683 return (unitp->ilu_ops->ilops_dma_bindhdl(handle, dip, rdip, dma_handle,
684 684 dmareq, cookiep, ccountp));
685 685 }
686 686
687 687 int
688 688 iommulib_nexdma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
689 689 ddi_dma_handle_t dma_handle)
690 690 {
691 691 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
692 692 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
693 693
694 694 ASSERT(unitp);
695 695
696 696 /* No need to grab lock - the handle is reference counted */
697 697 return (unitp->ilu_ops->ilops_dma_unbindhdl(handle, dip, rdip,
698 698 dma_handle));
699 699 }
700 700
701 701 int
702 702 iommulib_nexdma_sync(dev_info_t *dip, dev_info_t *rdip,
703 703 ddi_dma_handle_t dma_handle, off_t off, size_t len,
704 704 uint_t cache_flags)
705 705 {
706 706 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
707 707 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
708 708
709 709 ASSERT(unitp);
710 710
711 711 /* No need to grab lock - the handle is reference counted */
712 712 return (unitp->ilu_ops->ilops_dma_sync(handle, dip, rdip, dma_handle,
713 713 off, len, cache_flags));
714 714 }
715 715
716 716 int
717 717 iommulib_nexdma_win(dev_info_t *dip, dev_info_t *rdip,
718 718 ddi_dma_handle_t dma_handle, uint_t win, off_t *offp, size_t *lenp,
719 719 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
720 720 {
721 721 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
722 722 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
723 723
724 724 ASSERT(unitp);
725 725
726 726 /* No need to grab lock - the handle is reference counted */
727 727 return (unitp->ilu_ops->ilops_dma_win(handle, dip, rdip, dma_handle,
728 728 win, offp, lenp, cookiep, ccountp));
729 729 }
730 730
731 731 int
732 732 iommulib_nexdma_mapobject(dev_info_t *dip, dev_info_t *rdip,
733 733 ddi_dma_handle_t dma_handle, struct ddi_dma_req *dmareq,
734 734 ddi_dma_obj_t *dmao)
735 735 {
736 736 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
737 737 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
738 738
739 739 return (unitp->ilu_ops->ilops_dma_mapobject(handle, dip, rdip,
740 740 dma_handle, dmareq, dmao));
741 741 }
742 742
743 743 int
744 744 iommulib_nexdma_unmapobject(dev_info_t *dip, dev_info_t *rdip,
745 745 ddi_dma_handle_t dma_handle, ddi_dma_obj_t *dmao)
746 746 {
747 747 iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
748 748 iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
749 749
750 750 return (unitp->ilu_ops->ilops_dma_unmapobject(handle, dip, rdip,
751 751 dma_handle, dmao));
752 752 }
753 753
754 754 /* Utility routines invoked by IOMMU drivers */
755 755 int
756 756 iommulib_iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
757 757 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
758 758 ddi_dma_handle_t *handlep)
759 759 {
760 760 iommulib_nexops_t *nexops;
761 761
762 762 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
763 763 return (nexops->nops_dma_allochdl(dip, rdip, attr, waitfp, arg,
764 764 handlep));
765 765 }
766 766
767 767 int
768 768 iommulib_iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
769 769 ddi_dma_handle_t handle)
770 770 {
771 771 iommulib_nexops_t *nexops;
772 772
773 773 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
774 774 ASSERT(nexops);
775 775 return (nexops->nops_dma_freehdl(dip, rdip, handle));
776 776 }
777 777
778 778 int
779 779 iommulib_iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
780 780 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
781 781 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
782 782 {
783 783 iommulib_nexops_t *nexops;
784 784
785 785 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
786 786 return (nexops->nops_dma_bindhdl(dip, rdip, handle, dmareq,
787 787 cookiep, ccountp));
788 788 }
789 789
790 790 int
791 791 iommulib_iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
792 792 ddi_dma_handle_t handle)
793 793 {
794 794 iommulib_nexops_t *nexops;
795 795
796 796 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
797 797 return (nexops->nops_dma_unbindhdl(dip, rdip, handle));
798 798 }
799 799
800 800 void
801 801 iommulib_iommu_dma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
802 802 {
803 803 iommulib_nexops_t *nexops;
804 804
805 805 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
806 806 nexops->nops_dma_reset_cookies(dip, handle);
807 807 }
808 808
809 809 int
810 810 iommulib_iommu_dma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
811 811 ddi_dma_cookie_t **cookiepp, uint_t *ccountp)
812 812 {
813 813 iommulib_nexops_t *nexops;
814 814
815 815 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
816 816 return (nexops->nops_dma_get_cookies(dip, handle, cookiepp, ccountp));
817 817 }
818 818
819 819 int
820 820 iommulib_iommu_dma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
821 821 ddi_dma_cookie_t *cookiep, uint_t ccount)
822 822 {
823 823 iommulib_nexops_t *nexops;
824 824
825 825 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
826 826 return (nexops->nops_dma_set_cookies(dip, handle, cookiep, ccount));
827 827 }
828 828
829 829 int
830 830 iommulib_iommu_dma_clear_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
831 831 {
832 832 iommulib_nexops_t *nexops;
833 833
834 834 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
835 835 return (nexops->nops_dma_clear_cookies(dip, handle));
836 836 }
837 837
838 838 int
839 839 iommulib_iommu_dma_get_sleep_flags(dev_info_t *dip, ddi_dma_handle_t handle)
840 840 {
841 841 iommulib_nexops_t *nexops;
842 842
843 843 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
844 844 return (nexops->nops_dma_get_sleep_flags(handle));
845 845 }
846 846
847 847 int
848 848 iommulib_iommu_dma_sync(dev_info_t *dip, dev_info_t *rdip,
849 849 ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags)
850 850 {
851 851 iommulib_nexops_t *nexops;
852 852
853 853 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
854 854 return (nexops->nops_dma_sync(dip, rdip, handle, off, len,
855 855 cache_flags));
856 856 }
857 857
858 858 int
859 859 iommulib_iommu_dma_win(dev_info_t *dip, dev_info_t *rdip,
860 860 ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
861 861 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
862 862 {
863 863 iommulib_nexops_t *nexops;
864 864
865 865 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
866 866 return (nexops->nops_dma_win(dip, rdip, handle, win, offp, lenp,
867 867 cookiep, ccountp));
868 868 }
869 869
870 870 int
871 871 iommulib_iommu_dmahdl_setprivate(dev_info_t *dip, dev_info_t *rdip,
872 872 ddi_dma_handle_t handle, void *priv)
873 873 {
874 874 iommulib_nexops_t *nexops;
875 875
876 876 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
877 877 return (nexops->nops_dmahdl_setprivate(dip, rdip, handle, priv));
878 878 }
879 879
880 880 void *
881 881 iommulib_iommu_dmahdl_getprivate(dev_info_t *dip, dev_info_t *rdip,
882 882 ddi_dma_handle_t handle)
883 883 {
884 884 iommulib_nexops_t *nexops;
885 885
886 886 nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
887 887 return (nexops->nops_dmahdl_getprivate(dip, rdip, handle));
888 888 }
889 889
890 890 int
891 891 iommulib_iommu_getunitid(iommulib_handle_t handle, uint64_t *unitidp)
892 892 {
893 893 iommulib_unit_t *unitp;
894 894 uint64_t unitid;
895 895
896 896 unitp = (iommulib_unit_t *)handle;
897 897
898 898 ASSERT(unitp);
899 899 ASSERT(unitidp);
900 900
901 901 mutex_enter(&unitp->ilu_lock);
902 902 unitid = unitp->ilu_unitid;
903 903 mutex_exit(&unitp->ilu_lock);
904 904
905 905 ASSERT(unitid > 0);
906 906 *unitidp = (uint64_t)unitid;
907 907
908 908 return (DDI_SUCCESS);
909 909 }
910 910
911 911 dev_info_t *
912 912 iommulib_iommu_getdip(iommulib_handle_t handle)
913 913 {
914 914 iommulib_unit_t *unitp;
915 915 dev_info_t *dip;
916 916
917 917 unitp = (iommulib_unit_t *)handle;
918 918
919 919 ASSERT(unitp);
920 920
921 921 mutex_enter(&unitp->ilu_lock);
922 922 dip = unitp->ilu_dip;
923 923 ASSERT(dip);
924 924 ndi_hold_devi(dip);
925 925 mutex_exit(&unitp->ilu_lock);
926 926
927 927 return (dip);
928 928 }
929 929
930 930 iommulib_ops_t *
931 931 iommulib_iommu_getops(iommulib_handle_t handle)
932 932 {
933 933 iommulib_unit_t *unitp;
934 934 iommulib_ops_t *ops;
935 935
936 936 unitp = (iommulib_unit_t *)handle;
937 937
938 938 ASSERT(unitp);
939 939
940 940 mutex_enter(&unitp->ilu_lock);
941 941 ops = unitp->ilu_ops;
942 942 mutex_exit(&unitp->ilu_lock);
943 943
944 944 ASSERT(ops);
945 945
946 946 return (ops);
947 947 }
948 948
949 949 void *
950 950 iommulib_iommu_getdata(iommulib_handle_t handle)
951 951 {
952 952 iommulib_unit_t *unitp;
953 953 void *data;
954 954
955 955 unitp = (iommulib_unit_t *)handle;
956 956
957 957 ASSERT(unitp);
958 958
959 959 mutex_enter(&unitp->ilu_lock);
960 960 data = unitp->ilu_data;
961 961 mutex_exit(&unitp->ilu_lock);
962 962
963 963 ASSERT(data);
964 964
965 965 return (data);
966 966 }
↓ open down ↓ |
869 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX