Print this page
PANKOVs restructure
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c
+++ new/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 #include <sys/sunddi.h>
27 27 #include <sys/sunndi.h>
28 -#include <sys/acpi/acpi.h>
28 +#include <acpica/include/acpi.h>
29 29 #include <sys/acpica.h>
30 30 #include <sys/amd_iommu.h>
31 31 #include <sys/bootconf.h>
32 32 #include <sys/sysmacros.h>
33 33 #include <sys/ddidmareq.h>
34 34
35 35 #include "amd_iommu_impl.h"
36 36 #include "amd_iommu_acpi.h"
37 37 #include "amd_iommu_page_tables.h"
38 38
39 39 ddi_dma_attr_t amd_iommu_pgtable_dma_attr = {
40 40 DMA_ATTR_V0,
41 41 0U, /* dma_attr_addr_lo */
42 42 0xffffffffffffffffULL, /* dma_attr_addr_hi */
43 43 0xffffffffU, /* dma_attr_count_max */
44 44 (uint64_t)4096, /* dma_attr_align */
45 45 1, /* dma_attr_burstsizes */
46 46 64, /* dma_attr_minxfer */
47 47 0xffffffffU, /* dma_attr_maxxfer */
48 48 0xffffffffU, /* dma_attr_seg */
49 49 1, /* dma_attr_sgllen, variable */
50 50 64, /* dma_attr_granular */
51 51 0 /* dma_attr_flags */
52 52 };
53 53
54 54 static amd_iommu_domain_t **amd_iommu_domain_table;
55 55
56 56 static struct {
57 57 int f_count;
58 58 amd_iommu_page_table_t *f_list;
59 59 } amd_iommu_pgtable_freelist;
60 60 int amd_iommu_no_pgtable_freelist;
61 61
62 62 /*ARGSUSED*/
63 63 static int
64 64 amd_iommu_get_src_bdf(amd_iommu_t *iommu, int32_t bdf, int32_t *src_bdfp)
65 65 {
66 66 amd_iommu_acpi_ivhd_t *hinfop;
67 67
68 68 hinfop = amd_iommu_lookup_ivhd(bdf);
69 69 if (hinfop == NULL) {
70 70 if (bdf == -1) {
71 71 *src_bdfp = bdf;
72 72 } else {
73 73 cmn_err(CE_WARN, "No IVHD entry for 0x%x", bdf);
74 74 return (DDI_FAILURE);
75 75 }
76 76 } else if (hinfop->ach_src_deviceid == -1) {
77 77 *src_bdfp = bdf;
78 78 } else {
79 79 *src_bdfp = hinfop->ach_src_deviceid;
80 80 }
81 81
82 82 return (DDI_SUCCESS);
83 83 }
84 84
85 85 /*ARGSUSED*/
86 86 static int
87 87 amd_iommu_get_domain(amd_iommu_t *iommu, dev_info_t *rdip, int alias,
88 88 uint16_t deviceid, domain_id_t *domainid, const char *path)
89 89 {
90 90 const char *f = "amd_iommu_get_domain";
91 91
92 92 *domainid = AMD_IOMMU_INVALID_DOMAIN;
93 93
94 94 ASSERT(strcmp(ddi_driver_name(rdip), "agpgart") != 0);
95 95
96 96 switch (deviceid) {
97 97 case AMD_IOMMU_INVALID_DOMAIN:
98 98 case AMD_IOMMU_IDENTITY_DOMAIN:
99 99 case AMD_IOMMU_PASSTHRU_DOMAIN:
100 100 case AMD_IOMMU_SYS_DOMAIN:
101 101 *domainid = AMD_IOMMU_SYS_DOMAIN;
102 102 break;
103 103 default:
104 104 *domainid = deviceid;
105 105 break;
106 106 }
107 107
108 108 if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
109 109 cmn_err(CE_NOTE, "%s: domainid for %s = %d",
110 110 f, path, *domainid);
111 111 }
112 112
113 113 return (DDI_SUCCESS);
114 114 }
115 115
116 116 static uint16_t
117 117 hash_domain(domain_id_t domainid)
118 118 {
119 119 return (domainid % AMD_IOMMU_DOMAIN_HASH_SZ);
120 120 }
121 121
122 122 /*ARGSUSED*/
123 123 void
124 124 amd_iommu_init_page_tables(amd_iommu_t *iommu)
125 125 {
126 126 amd_iommu_domain_table = kmem_zalloc(
127 127 sizeof (amd_iommu_domain_t *) * AMD_IOMMU_DOMAIN_HASH_SZ, KM_SLEEP);
128 128 }
129 129
130 130 /*ARGSUSED*/
131 131 void
132 132 amd_iommu_fini_page_tables(amd_iommu_t *iommu)
133 133 {
134 134 if (amd_iommu_domain_table) {
135 135 kmem_free(amd_iommu_domain_table,
136 136 sizeof (amd_iommu_domain_t *) * AMD_IOMMU_DOMAIN_HASH_SZ);
137 137 amd_iommu_domain_table = NULL;
138 138 }
139 139 }
140 140
141 141 static amd_iommu_domain_t *
142 142 amd_iommu_lookup_domain(amd_iommu_t *iommu, domain_id_t domainid,
143 143 map_type_t type, int km_flags)
144 144 {
145 145 uint16_t idx;
146 146 amd_iommu_domain_t *dp;
147 147 char name[AMD_IOMMU_VMEM_NAMELEN+1];
148 148
149 149 ASSERT(amd_iommu_domain_table);
150 150
151 151 idx = hash_domain(domainid);
152 152
153 153 for (dp = amd_iommu_domain_table[idx]; dp; dp = dp->d_next) {
154 154 if (dp->d_domainid == domainid)
155 155 return (dp);
156 156 }
157 157
158 158 ASSERT(type != AMD_IOMMU_INVALID_MAP);
159 159
160 160 dp = kmem_zalloc(sizeof (*dp), km_flags);
161 161 if (dp == NULL)
162 162 return (NULL);
163 163 dp->d_domainid = domainid;
164 164 dp->d_pgtable_root_4K = 0; /* make this explicit */
165 165
166 166 if (type == AMD_IOMMU_VMEM_MAP) {
167 167 uint64_t base;
168 168 uint64_t size;
169 169 (void) snprintf(name, sizeof (name), "dvma_idx%d_domain%d",
170 170 iommu->aiomt_idx, domainid);
171 171 base = MMU_PAGESIZE;
172 172 size = AMD_IOMMU_SIZE_4G - MMU_PAGESIZE;
173 173 dp->d_vmem = vmem_create(name, (void *)(uintptr_t)base, size,
174 174 MMU_PAGESIZE, NULL, NULL, NULL, 0,
175 175 km_flags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP);
176 176 if (dp->d_vmem == NULL) {
177 177 kmem_free(dp, sizeof (*dp));
178 178 return (NULL);
179 179 }
180 180 } else {
181 181 dp->d_vmem = NULL;
182 182 }
183 183
184 184 dp->d_next = amd_iommu_domain_table[idx];
185 185 dp->d_prev = NULL;
186 186 amd_iommu_domain_table[idx] = dp;
187 187 if (dp->d_next)
188 188 dp->d_next->d_prev = dp;
189 189 dp->d_ref = 0;
190 190
191 191
192 192 return (dp);
193 193 }
194 194
195 195 static void
196 196 amd_iommu_teardown_domain(amd_iommu_t *iommu, amd_iommu_domain_t *dp)
197 197 {
198 198 uint16_t idx;
199 199 int flags;
200 200 amd_iommu_cmdargs_t cmdargs = {0};
201 201 domain_id_t domainid = dp->d_domainid;
202 202 const char *f = "amd_iommu_teardown_domain";
203 203
204 204 ASSERT(dp->d_ref == 0);
205 205
206 206 idx = hash_domain(dp->d_domainid);
207 207
208 208 if (dp->d_prev == NULL)
209 209 amd_iommu_domain_table[idx] = dp->d_next;
210 210 else
211 211 dp->d_prev->d_next = dp->d_next;
212 212
213 213 if (dp->d_next)
214 214 dp->d_next->d_prev = dp->d_prev;
215 215
216 216 if (dp->d_vmem != NULL) {
217 217 vmem_destroy(dp->d_vmem);
218 218 dp->d_vmem = NULL;
219 219 }
220 220
221 221 kmem_free(dp, sizeof (*dp));
222 222
223 223 cmdargs.ca_domainid = (uint16_t)domainid;
224 224 cmdargs.ca_addr = (uintptr_t)0x7FFFFFFFFFFFF000;
225 225 flags = AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL |
226 226 AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S;
227 227
228 228 if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_IOMMU_PAGES,
229 229 &cmdargs, flags, 0) != DDI_SUCCESS) {
230 230 cmn_err(CE_WARN, "%s: idx=%d: domainid=%d"
231 231 "Failed to invalidate domain in IOMMU HW cache",
232 232 f, iommu->aiomt_idx, cmdargs.ca_domainid);
233 233 }
234 234 }
235 235
236 236 static int
237 237 amd_iommu_get_deviceid(amd_iommu_t *iommu, dev_info_t *rdip, int32_t *deviceid,
238 238 int *aliasp, const char *path)
239 239 {
240 240 int bus = -1;
241 241 int device = -1;
242 242 int func = -1;
243 243 uint16_t bdf;
244 244 int32_t src_bdf;
245 245 dev_info_t *idip = iommu->aiomt_dip;
246 246 const char *driver = ddi_driver_name(idip);
247 247 int instance = ddi_get_instance(idip);
248 248 dev_info_t *pci_dip;
249 249 const char *f = "amd_iommu_get_deviceid";
250 250
251 251 /* be conservative. Always assume an alias */
252 252 *aliasp = 1;
253 253 *deviceid = 0;
254 254
255 255 /* Check for special special devices (rdip == NULL) */
256 256 if (rdip == NULL) {
257 257 if (amd_iommu_get_src_bdf(iommu, -1, &src_bdf) != DDI_SUCCESS) {
258 258 cmn_err(CE_WARN,
259 259 "%s: %s%d: idx=%d, failed to get SRC BDF "
260 260 "for special-device",
261 261 f, driver, instance, iommu->aiomt_idx);
262 262 return (DDI_DMA_NOMAPPING);
263 263 }
264 264 *deviceid = src_bdf;
265 265 *aliasp = 1;
266 266 return (DDI_SUCCESS);
267 267 }
268 268
269 269 if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
270 270 cmn_err(CE_NOTE, "%s: attempting to get deviceid for %s",
271 271 f, path);
272 272 }
273 273
274 274 pci_dip = amd_iommu_pci_dip(rdip, path);
275 275 if (pci_dip == NULL) {
276 276 cmn_err(CE_WARN, "%s: %s%d: idx = %d, failed to get PCI dip "
277 277 "for rdip=%p, path = %s",
278 278 f, driver, instance, iommu->aiomt_idx, (void *)rdip,
279 279 path);
280 280 return (DDI_DMA_NOMAPPING);
281 281 }
282 282
283 283 if (acpica_get_bdf(pci_dip, &bus, &device, &func) != DDI_SUCCESS) {
284 284 ndi_rele_devi(pci_dip);
285 285 cmn_err(CE_WARN, "%s: %s%d: idx=%d, failed to get BDF for "
286 286 "PCI dip (%p). rdip path = %s",
287 287 f, driver, instance, iommu->aiomt_idx,
288 288 (void *)pci_dip, path);
289 289 return (DDI_DMA_NOMAPPING);
290 290 }
291 291
292 292 ndi_rele_devi(pci_dip);
293 293
294 294 if (bus > UINT8_MAX || bus < 0 ||
295 295 device > UINT8_MAX || device < 0 ||
296 296 func > UINT8_MAX || func < 0) {
297 297 cmn_err(CE_WARN, "%s: %s%d: idx=%d, invalid BDF(%d,%d,%d) "
298 298 "for PCI dip (%p). rdip path = %s", f, driver, instance,
299 299 iommu->aiomt_idx,
300 300 bus, device, func,
301 301 (void *)pci_dip, path);
302 302 return (DDI_DMA_NOMAPPING);
303 303 }
304 304
305 305 bdf = ((uint8_t)bus << 8) | ((uint8_t)device << 3) | (uint8_t)func;
306 306
307 307 if (amd_iommu_get_src_bdf(iommu, bdf, &src_bdf) != DDI_SUCCESS) {
308 308 cmn_err(CE_WARN, "%s: %s%d: idx=%d, failed to get SRC BDF "
309 309 "for PCI dip (%p) rdip path = %s.",
310 310 f, driver, instance, iommu->aiomt_idx, (void *)pci_dip,
311 311 path);
312 312 return (DDI_DMA_NOMAPPING);
313 313 }
314 314
315 315 if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
316 316 cmn_err(CE_NOTE, "%s: Deviceid = %u for path = %s",
317 317 f, src_bdf, path);
318 318 }
319 319
320 320 *deviceid = src_bdf;
321 321 *aliasp = (src_bdf != bdf);
322 322
323 323 return (DDI_SUCCESS);
324 324 }
325 325
326 326 /*ARGSUSED*/
327 327 static int
328 328 init_devtbl(amd_iommu_t *iommu, uint64_t *devtbl_entry, domain_id_t domainid,
329 329 amd_iommu_domain_t *dp)
330 330 {
331 331 uint64_t entry[4] = {0};
332 332 int i;
333 333
334 334 /* If already passthru, don't touch */
335 335 if (AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V) == 0 &&
336 336 AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV) == 0) {
337 337 return (0);
338 338 }
339 339
340 340 if (AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V) == 1 &&
341 341 AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV) == 1) {
342 342
343 343 ASSERT(dp->d_pgtable_root_4K ==
344 344 AMD_IOMMU_REG_GET64(&(devtbl_entry[0]),
345 345 AMD_IOMMU_DEVTBL_ROOT_PGTBL));
346 346
347 347 ASSERT(dp->d_domainid == AMD_IOMMU_REG_GET64(&(devtbl_entry[1]),
348 348 AMD_IOMMU_DEVTBL_DOMAINID));
349 349
350 350 return (0);
351 351 }
352 352
353 353 /* New devtbl entry for this domain. Bump up the domain ref-count */
354 354 dp->d_ref++;
355 355
356 356 entry[3] = 0;
357 357 entry[2] = 0;
358 358 AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_EX, 1);
359 359 AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_SD, 0);
360 360 AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_CACHE, 0);
361 361 AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_IOCTL, 1);
362 362 AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_SA, 0);
363 363 AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_SE, 1);
364 364 AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_DOMAINID,
365 365 (uint16_t)domainid);
366 366 AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_IW, 1);
367 367 AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_IR, 1);
368 368 AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_ROOT_PGTBL,
369 369 dp->d_pgtable_root_4K);
370 370 AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_PG_MODE,
371 371 AMD_IOMMU_PGTABLE_MAXLEVEL);
372 372 AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_TV,
373 373 domainid == AMD_IOMMU_PASSTHRU_DOMAIN ? 0 : 1);
374 374 AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_V,
375 375 domainid == AMD_IOMMU_PASSTHRU_DOMAIN ? 0 : 1);
376 376
377 377 for (i = 1; i < 4; i++) {
378 378 devtbl_entry[i] = entry[i];
379 379 }
380 380 devtbl_entry[0] = entry[0];
381 381
382 382 /* we did an actual init */
383 383 return (1);
384 384 }
385 385
386 386 void
387 387 amd_iommu_set_passthru(amd_iommu_t *iommu, dev_info_t *rdip)
388 388 {
389 389 int32_t deviceid;
390 390 int alias;
391 391 uint64_t *devtbl_entry;
392 392 amd_iommu_cmdargs_t cmdargs = {0};
393 393 char *path;
394 394 int pathfree;
395 395 int V;
396 396 int TV;
397 397 int instance;
398 398 const char *driver;
399 399 const char *f = "amd_iommu_set_passthru";
400 400
401 401 if (rdip) {
402 402 driver = ddi_driver_name(rdip);
403 403 instance = ddi_get_instance(rdip);
404 404 } else {
405 405 driver = "special-device";
406 406 instance = 0;
407 407 }
408 408
409 409 path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
410 410 if (path) {
411 411 if (rdip)
412 412 (void) ddi_pathname(rdip, path);
413 413 else
414 414 (void) strcpy(path, "special-device");
415 415 pathfree = 1;
416 416 } else {
417 417 pathfree = 0;
418 418 path = "<path-mem-alloc-failed>";
419 419 }
420 420
421 421 if (amd_iommu_get_deviceid(iommu, rdip, &deviceid, &alias, path)
422 422 != DDI_SUCCESS) {
423 423 cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p. "
424 424 "Failed to get device ID for device %s.", f, driver,
425 425 instance,
426 426 iommu->aiomt_idx, (void *)rdip, path);
427 427 goto out;
428 428 }
429 429
430 430 /* No deviceid */
431 431 if (deviceid == -1) {
432 432 goto out;
433 433 }
434 434
435 435 if ((deviceid + 1) * AMD_IOMMU_DEVTBL_ENTRY_SZ >
436 436 iommu->aiomt_devtbl_sz) {
437 437 cmn_err(CE_WARN, "%s: %s%d: IOMMU idx=%d, deviceid (%u) "
438 438 "for rdip (%p) exceeds device table size (%u), path=%s",
439 439 f, driver,
440 440 instance, iommu->aiomt_idx, deviceid, (void *)rdip,
441 441 iommu->aiomt_devtbl_sz, path);
442 442 goto out;
443 443 }
444 444
445 445 /*LINTED*/
446 446 devtbl_entry = (uint64_t *)&iommu->aiomt_devtbl
447 447 [deviceid * AMD_IOMMU_DEVTBL_ENTRY_SZ];
448 448
449 449 V = AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V);
450 450 TV = AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV);
451 451
452 452 /* Already passthru */
453 453 if (V == 0 && TV == 0) {
454 454 goto out;
455 455 }
456 456
457 457 /* Existing translations */
458 458 if (V == 1 && TV == 1) {
459 459 goto out;
460 460 }
461 461
462 462 /* Invalid setting */
463 463 if (V == 0 && TV == 1) {
464 464 goto out;
465 465 }
466 466
467 467 AMD_IOMMU_REG_SET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V, 0);
468 468
469 469 cmdargs.ca_deviceid = (uint16_t)deviceid;
470 470 (void) amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
471 471 &cmdargs, 0, 0);
472 472
473 473 out:
474 474 if (pathfree)
475 475 kmem_free(path, MAXPATHLEN);
476 476 }
477 477
478 478 static int
479 479 amd_iommu_set_devtbl_entry(amd_iommu_t *iommu, dev_info_t *rdip,
480 480 domain_id_t domainid, uint16_t deviceid, amd_iommu_domain_t *dp,
481 481 const char *path)
482 482 {
483 483 uint64_t *devtbl_entry;
484 484 amd_iommu_cmdargs_t cmdargs = {0};
485 485 int error, flags;
486 486 dev_info_t *idip = iommu->aiomt_dip;
487 487 const char *driver = ddi_driver_name(idip);
488 488 int instance = ddi_get_instance(idip);
489 489 const char *f = "amd_iommu_set_devtbl_entry";
490 490
491 491 if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
492 492 cmn_err(CE_NOTE, "%s: attempting to set devtbl entry for %s",
493 493 f, path);
494 494 }
495 495
496 496 if ((deviceid + 1) * AMD_IOMMU_DEVTBL_ENTRY_SZ >
497 497 iommu->aiomt_devtbl_sz) {
498 498 cmn_err(CE_WARN, "%s: %s%d: IOMMU idx=%d, deviceid (%u) "
499 499 "for rdip (%p) exceeds device table size (%u), path=%s",
500 500 f, driver,
501 501 instance, iommu->aiomt_idx, deviceid, (void *)rdip,
502 502 iommu->aiomt_devtbl_sz, path);
503 503 return (DDI_DMA_NOMAPPING);
504 504 }
505 505
506 506 /*LINTED*/
507 507 devtbl_entry = (uint64_t *)&iommu->aiomt_devtbl
508 508 [deviceid * AMD_IOMMU_DEVTBL_ENTRY_SZ];
509 509
510 510 if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
511 511 cmn_err(CE_NOTE, "%s: deviceid=%u devtbl entry (%p) for %s",
512 512 f, deviceid, (void *)(uintptr_t)(*devtbl_entry), path);
513 513 }
514 514
515 515 /*
516 516 * Flush internal caches, need to do this if we came up from
517 517 * fast boot
518 518 */
519 519 cmdargs.ca_deviceid = deviceid;
520 520 error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
521 521 &cmdargs, 0, 0);
522 522 if (error != DDI_SUCCESS) {
523 523 cmn_err(CE_WARN, "%s: idx=%d: deviceid=%d"
524 524 "Failed to invalidate domain in IOMMU HW cache",
525 525 f, iommu->aiomt_idx, deviceid);
526 526 return (error);
527 527 }
528 528
529 529 cmdargs.ca_domainid = (uint16_t)domainid;
530 530 cmdargs.ca_addr = (uintptr_t)0x7FFFFFFFFFFFF000;
531 531 flags = AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL |
532 532 AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S;
533 533
534 534 error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_IOMMU_PAGES,
535 535 &cmdargs, flags, 0);
536 536 if (error != DDI_SUCCESS) {
537 537 cmn_err(CE_WARN, "%s: idx=%d: domainid=%d"
538 538 "Failed to invalidate translations in IOMMU HW cache",
539 539 f, iommu->aiomt_idx, cmdargs.ca_domainid);
540 540 return (error);
541 541 }
542 542
543 543 /* Initialize device table entry */
544 544 if (init_devtbl(iommu, devtbl_entry, domainid, dp)) {
545 545 cmdargs.ca_deviceid = deviceid;
546 546 error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
547 547 &cmdargs, 0, 0);
548 548 }
549 549
550 550 return (error);
551 551 }
552 552
553 553 int
554 554 amd_iommu_clear_devtbl_entry(amd_iommu_t *iommu, dev_info_t *rdip,
555 555 domain_id_t domainid, uint16_t deviceid, amd_iommu_domain_t *dp,
556 556 int *domain_freed, char *path)
557 557 {
558 558 uint64_t *devtbl_entry;
559 559 int error = DDI_SUCCESS;
560 560 amd_iommu_cmdargs_t cmdargs = {0};
561 561 const char *driver = ddi_driver_name(iommu->aiomt_dip);
562 562 int instance = ddi_get_instance(iommu->aiomt_dip);
563 563 const char *f = "amd_iommu_clear_devtbl_entry";
564 564
565 565 if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
566 566 cmn_err(CE_NOTE, "%s: attempting to clear devtbl entry for "
567 567 "domainid = %d, deviceid = %u, path = %s",
568 568 f, domainid, deviceid, path);
569 569 }
570 570
571 571 if ((deviceid + 1) * AMD_IOMMU_DEVTBL_ENTRY_SZ >
572 572 iommu->aiomt_devtbl_sz) {
573 573 cmn_err(CE_WARN, "%s: %s%d: IOMMU idx=%d, deviceid (%u) "
574 574 "for rdip (%p) exceeds device table size (%u), path = %s",
575 575 f, driver, instance,
576 576 iommu->aiomt_idx, deviceid, (void *)rdip,
577 577 iommu->aiomt_devtbl_sz, path);
578 578 return (DDI_FAILURE);
579 579 }
580 580
581 581 /*LINTED*/
582 582 devtbl_entry = (uint64_t *)&iommu->aiomt_devtbl
583 583 [deviceid * AMD_IOMMU_DEVTBL_ENTRY_SZ];
584 584
585 585 if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
586 586 cmn_err(CE_NOTE, "%s: deviceid=%u devtbl entry (%p) for %s",
587 587 f, deviceid, (void *)(uintptr_t)(*devtbl_entry), path);
588 588 }
589 589
590 590 if (AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV) == 0) {
591 591 /* Nothing to do */
592 592 return (DDI_SUCCESS);
593 593 }
594 594
595 595 ASSERT(dp->d_pgtable_root_4K == AMD_IOMMU_REG_GET64(&(devtbl_entry[0]),
596 596 AMD_IOMMU_DEVTBL_ROOT_PGTBL));
597 597
598 598 ASSERT(domainid == AMD_IOMMU_REG_GET64(&(devtbl_entry[1]),
599 599 AMD_IOMMU_DEVTBL_DOMAINID));
600 600
601 601 AMD_IOMMU_REG_SET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV, 0);
602 602 AMD_IOMMU_REG_SET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_ROOT_PGTBL, 0);
603 603 AMD_IOMMU_REG_SET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V, 1);
604 604
605 605 SYNC_FORDEV(iommu->aiomt_dmahdl);
606 606
607 607 dp->d_ref--;
608 608 ASSERT(dp->d_ref >= 0);
609 609
610 610 if (dp->d_ref == 0) {
611 611 *domain_freed = 1;
612 612 }
613 613
614 614 cmdargs.ca_deviceid = deviceid;
615 615 error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
616 616 &cmdargs, 0, 0);
617 617 if (error != DDI_SUCCESS)
618 618 error = DDI_FAILURE;
619 619
620 620 return (error);
621 621 }
622 622
623 623 int
624 624 amd_iommu_page_table_hash_init(amd_iommu_page_table_hash_t *ampt)
625 625 {
626 626 ampt->ampt_hash = kmem_zalloc(sizeof (amd_iommu_page_table_t *) *
627 627 AMD_IOMMU_PGTABLE_HASH_SZ, KM_SLEEP);
628 628 return (DDI_SUCCESS);
629 629 }
630 630
631 631 void
632 632 amd_iommu_page_table_hash_fini(amd_iommu_page_table_hash_t *ampt)
633 633 {
634 634 kmem_free(ampt->ampt_hash,
635 635 sizeof (amd_iommu_page_table_t *) * AMD_IOMMU_PGTABLE_HASH_SZ);
636 636 ampt->ampt_hash = NULL;
637 637 }
638 638
639 639 static uint32_t
640 640 pt_hashfn(uint64_t pa_4K)
641 641 {
642 642 return (pa_4K % AMD_IOMMU_PGTABLE_HASH_SZ);
643 643 }
644 644
645 645 static void
646 646 amd_iommu_insert_pgtable_hash(amd_iommu_page_table_t *pt)
647 647 {
648 648 uint64_t pa_4K = ((uint64_t)pt->pt_cookie.dmac_cookie_addr) >> 12;
649 649 uint32_t idx = pt_hashfn(pa_4K);
650 650
651 651 ASSERT((pt->pt_cookie.dmac_cookie_addr & AMD_IOMMU_PGTABLE_ALIGN) == 0);
652 652
653 653 mutex_enter(&amd_iommu_page_table_hash.ampt_lock);
654 654
655 655 pt->pt_next = amd_iommu_page_table_hash.ampt_hash[idx];
656 656 pt->pt_prev = NULL;
657 657 amd_iommu_page_table_hash.ampt_hash[idx] = pt;
658 658 if (pt->pt_next)
659 659 pt->pt_next->pt_prev = pt;
660 660
661 661 mutex_exit(&amd_iommu_page_table_hash.ampt_lock);
662 662 }
663 663
664 664 static void
665 665 amd_iommu_remove_pgtable_hash(amd_iommu_page_table_t *pt)
666 666 {
667 667 uint64_t pa_4K = (pt->pt_cookie.dmac_cookie_addr >> 12);
668 668 uint32_t idx = pt_hashfn(pa_4K);
669 669
670 670 ASSERT((pt->pt_cookie.dmac_cookie_addr & AMD_IOMMU_PGTABLE_ALIGN) == 0);
671 671
672 672 mutex_enter(&amd_iommu_page_table_hash.ampt_lock);
673 673
674 674 if (pt->pt_next)
675 675 pt->pt_next->pt_prev = pt->pt_prev;
676 676
677 677 if (pt->pt_prev)
678 678 pt->pt_prev->pt_next = pt->pt_next;
679 679 else
680 680 amd_iommu_page_table_hash.ampt_hash[idx] = pt->pt_next;
681 681
682 682 pt->pt_next = NULL;
683 683 pt->pt_prev = NULL;
684 684
685 685 mutex_exit(&amd_iommu_page_table_hash.ampt_lock);
686 686 }
687 687
688 688 static amd_iommu_page_table_t *
689 689 amd_iommu_lookup_pgtable_hash(domain_id_t domainid, uint64_t pgtable_pa_4K)
690 690 {
691 691 amd_iommu_page_table_t *pt;
692 692 uint32_t idx = pt_hashfn(pgtable_pa_4K);
693 693
694 694 mutex_enter(&amd_iommu_page_table_hash.ampt_lock);
695 695 pt = amd_iommu_page_table_hash.ampt_hash[idx];
696 696 for (; pt; pt = pt->pt_next) {
697 697 if (domainid != pt->pt_domainid)
698 698 continue;
699 699 ASSERT((pt->pt_cookie.dmac_cookie_addr &
700 700 AMD_IOMMU_PGTABLE_ALIGN) == 0);
701 701 if ((pt->pt_cookie.dmac_cookie_addr >> 12) == pgtable_pa_4K) {
702 702 break;
703 703 }
704 704 }
705 705 mutex_exit(&amd_iommu_page_table_hash.ampt_lock);
706 706
707 707 return (pt);
708 708 }
709 709
710 710 /*ARGSUSED*/
711 711 static amd_iommu_page_table_t *
712 712 amd_iommu_lookup_pgtable(amd_iommu_t *iommu, amd_iommu_page_table_t *ppt,
713 713 amd_iommu_domain_t *dp, int level, uint16_t index)
714 714 {
715 715 uint64_t *pdtep;
716 716 uint64_t pgtable_pa_4K;
717 717
718 718 ASSERT(level > 0 && level <= AMD_IOMMU_PGTABLE_MAXLEVEL);
719 719 ASSERT(dp);
720 720
721 721 if (level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
722 722 ASSERT(ppt == NULL);
723 723 ASSERT(index == 0);
724 724 pgtable_pa_4K = dp->d_pgtable_root_4K;
725 725 } else {
726 726 ASSERT(ppt);
727 727 pdtep = &(ppt->pt_pgtblva[index]);
728 728 if (AMD_IOMMU_REG_GET64(pdtep, AMD_IOMMU_PTDE_PR) == 0) {
729 729 if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
730 730 cmn_err(CE_NOTE, "Skipping PR=0 pdte: 0x%"
731 731 PRIx64, *pdtep);
732 732 }
733 733 return (NULL);
734 734 }
735 735 pgtable_pa_4K = AMD_IOMMU_REG_GET64(pdtep, AMD_IOMMU_PTDE_ADDR);
736 736 }
737 737
738 738 return (amd_iommu_lookup_pgtable_hash(dp->d_domainid, pgtable_pa_4K));
739 739 }
740 740
741 741 static amd_iommu_page_table_t *
742 742 amd_iommu_alloc_from_freelist(void)
743 743 {
744 744 int i;
745 745 uint64_t *pte_array;
746 746 amd_iommu_page_table_t *pt;
747 747
748 748 if (amd_iommu_no_pgtable_freelist == 1)
749 749 return (NULL);
750 750
751 751 if (amd_iommu_pgtable_freelist.f_count == 0)
752 752 return (NULL);
753 753
754 754 pt = amd_iommu_pgtable_freelist.f_list;
755 755 amd_iommu_pgtable_freelist.f_list = pt->pt_next;
756 756 amd_iommu_pgtable_freelist.f_count--;
757 757
758 758 pte_array = pt->pt_pgtblva;
759 759 for (i = 0; i < AMD_IOMMU_PGTABLE_SZ / (sizeof (*pte_array)); i++) {
760 760 ASSERT(pt->pt_pte_ref[i] == 0);
761 761 ASSERT(AMD_IOMMU_REG_GET64(&(pte_array[i]),
762 762 AMD_IOMMU_PTDE_PR) == 0);
763 763 }
764 764
765 765 return (pt);
766 766 }
767 767
768 768 static int
769 769 amd_iommu_alloc_pgtable(amd_iommu_t *iommu, domain_id_t domainid,
770 770 const char *path, amd_iommu_page_table_t **ptp, int km_flags)
771 771 {
772 772 int err;
773 773 uint_t ncookies;
774 774 amd_iommu_page_table_t *pt;
775 775 dev_info_t *idip = iommu->aiomt_dip;
776 776 const char *driver = ddi_driver_name(idip);
777 777 int instance = ddi_get_instance(idip);
778 778 const char *f = "amd_iommu_alloc_pgtable";
779 779
780 780 *ptp = NULL;
781 781
782 782 pt = amd_iommu_alloc_from_freelist();
783 783 if (pt)
784 784 goto init_pgtable;
785 785
786 786 pt = kmem_zalloc(sizeof (amd_iommu_page_table_t), km_flags);
787 787 if (pt == NULL)
788 788 return (DDI_DMA_NORESOURCES);
789 789
790 790 /*
791 791 * Each page table is 4K in size
792 792 */
793 793 pt->pt_mem_reqsz = AMD_IOMMU_PGTABLE_SZ;
794 794
795 795 /*
796 796 * Alloc a DMA handle. Use the IOMMU dip as we want this DMA
797 797 * to *not* enter the IOMMU - no recursive entrance.
798 798 */
799 799 err = ddi_dma_alloc_handle(idip, &amd_iommu_pgtable_dma_attr,
800 800 km_flags == KM_SLEEP ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
801 801 NULL, &pt->pt_dma_hdl);
802 802 if (err != DDI_SUCCESS) {
803 803 cmn_err(CE_WARN, "%s: %s%d: domainid = %d, path = %s. "
804 804 "Cannot alloc DMA handle for IO Page Table",
805 805 f, driver, instance, domainid, path);
806 806 kmem_free(pt, sizeof (amd_iommu_page_table_t));
807 807 return (err == DDI_DMA_NORESOURCES ? err : DDI_DMA_NOMAPPING);
808 808 }
809 809
810 810 /*
811 811 * Alloc memory for IO Page Table.
812 812 * XXX remove size_t cast kludge
813 813 */
814 814 err = ddi_dma_mem_alloc(pt->pt_dma_hdl, pt->pt_mem_reqsz,
815 815 &amd_iommu_devacc, DDI_DMA_CONSISTENT|IOMEM_DATA_UNCACHED,
816 816 km_flags == KM_SLEEP ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
817 817 NULL, (caddr_t *)&pt->pt_pgtblva,
818 818 (size_t *)&pt->pt_mem_realsz, &pt->pt_mem_hdl);
819 819 if (err != DDI_SUCCESS) {
820 820 cmn_err(CE_WARN, "%s: %s%d: domainid=%d, path = %s. "
821 821 "Cannot allocate DMA memory for IO Page table",
822 822 f, driver, instance, domainid, path);
823 823 ddi_dma_free_handle(&pt->pt_dma_hdl);
824 824 kmem_free(pt, sizeof (amd_iommu_page_table_t));
825 825 return (DDI_DMA_NORESOURCES);
826 826 }
827 827
828 828 /*
829 829 * The Page table DMA VA must be 4K aligned and
830 830 * size >= than requested memory.
831 831 *
832 832 */
833 833 ASSERT(((uint64_t)(uintptr_t)pt->pt_pgtblva & AMD_IOMMU_PGTABLE_ALIGN)
834 834 == 0);
835 835 ASSERT(pt->pt_mem_realsz >= pt->pt_mem_reqsz);
836 836
837 837 /*
838 838 * Now bind the handle
839 839 */
840 840 err = ddi_dma_addr_bind_handle(pt->pt_dma_hdl, NULL,
841 841 (caddr_t)pt->pt_pgtblva, pt->pt_mem_realsz,
842 842 DDI_DMA_READ | DDI_DMA_CONSISTENT,
843 843 km_flags == KM_SLEEP ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
844 844 NULL, &pt->pt_cookie, &ncookies);
845 845 if (err != DDI_DMA_MAPPED) {
846 846 cmn_err(CE_WARN, "%s: %s%d: domainid=%d, path = %s. "
847 847 "Cannot bind memory for DMA to IO Page Tables. "
848 848 "bufrealsz=%p",
849 849 f, driver, instance, domainid, path,
850 850 (void *)(uintptr_t)pt->pt_mem_realsz);
851 851 ddi_dma_mem_free(&pt->pt_mem_hdl);
852 852 ddi_dma_free_handle(&pt->pt_dma_hdl);
853 853 kmem_free(pt, sizeof (amd_iommu_page_table_t));
854 854 return (err == DDI_DMA_PARTIAL_MAP ? DDI_DMA_NOMAPPING :
855 855 err);
856 856 }
857 857
858 858 /*
859 859 * We assume the DMA engine on the IOMMU is capable of handling the
860 860 * whole page table in a single cookie. If not and multiple cookies
861 861 * are needed we fail.
862 862 */
863 863 if (ncookies != 1) {
864 864 cmn_err(CE_WARN, "%s: %s%d: domainid = %d, path=%s "
865 865 "Cannot handle multiple "
866 866 "cookies for DMA to IO page Table, #cookies=%u",
867 867 f, driver, instance, domainid, path, ncookies);
868 868 (void) ddi_dma_unbind_handle(pt->pt_dma_hdl);
869 869 ddi_dma_mem_free(&pt->pt_mem_hdl);
870 870 ddi_dma_free_handle(&pt->pt_dma_hdl);
871 871 kmem_free(pt, sizeof (amd_iommu_page_table_t));
872 872 return (DDI_DMA_NOMAPPING);
873 873 }
874 874
875 875 init_pgtable:
876 876 /*
877 877 * The address in the cookie must be 4K aligned and >= table size
878 878 */
879 879 ASSERT(pt->pt_cookie.dmac_cookie_addr != NULL);
880 880 ASSERT((pt->pt_cookie.dmac_cookie_addr & AMD_IOMMU_PGTABLE_ALIGN) == 0);
881 881 ASSERT(pt->pt_cookie.dmac_size >= pt->pt_mem_realsz);
882 882 ASSERT(pt->pt_cookie.dmac_size >= pt->pt_mem_reqsz);
883 883 ASSERT(pt->pt_mem_reqsz >= AMD_IOMMU_PGTABLE_SIZE);
884 884 ASSERT(pt->pt_mem_realsz >= pt->pt_mem_reqsz);
885 885 ASSERT(pt->pt_pgtblva);
886 886
887 887 pt->pt_domainid = AMD_IOMMU_INVALID_DOMAIN;
888 888 pt->pt_level = 0x7;
889 889 pt->pt_index = 0;
890 890 pt->pt_ref = 0;
891 891 pt->pt_next = NULL;
892 892 pt->pt_prev = NULL;
893 893 pt->pt_parent = NULL;
894 894
895 895 bzero(pt->pt_pgtblva, pt->pt_mem_realsz);
896 896 SYNC_FORDEV(pt->pt_dma_hdl);
897 897
898 898 amd_iommu_insert_pgtable_hash(pt);
899 899
900 900 *ptp = pt;
901 901
902 902 return (DDI_SUCCESS);
903 903 }
904 904
905 905 static int
906 906 amd_iommu_move_to_freelist(amd_iommu_page_table_t *pt)
907 907 {
908 908 if (amd_iommu_no_pgtable_freelist == 1)
909 909 return (DDI_FAILURE);
910 910
911 911 if (amd_iommu_pgtable_freelist.f_count ==
912 912 AMD_IOMMU_PGTABLE_FREELIST_MAX)
913 913 return (DDI_FAILURE);
914 914
915 915 pt->pt_next = amd_iommu_pgtable_freelist.f_list;
916 916 amd_iommu_pgtable_freelist.f_list = pt;
917 917 amd_iommu_pgtable_freelist.f_count++;
918 918
919 919 return (DDI_SUCCESS);
920 920 }
921 921
922 922 static void
923 923 amd_iommu_free_pgtable(amd_iommu_t *iommu, amd_iommu_page_table_t *pt)
924 924 {
925 925 int i;
926 926 uint64_t *pte_array;
927 927 dev_info_t *dip = iommu->aiomt_dip;
928 928 int instance = ddi_get_instance(dip);
929 929 const char *driver = ddi_driver_name(dip);
930 930 const char *f = "amd_iommu_free_pgtable";
931 931
932 932 ASSERT(pt->pt_ref == 0);
933 933
934 934 amd_iommu_remove_pgtable_hash(pt);
935 935
936 936 pte_array = pt->pt_pgtblva;
937 937 for (i = 0; i < AMD_IOMMU_PGTABLE_SZ / (sizeof (*pte_array)); i++) {
938 938 ASSERT(pt->pt_pte_ref[i] == 0);
939 939 ASSERT(AMD_IOMMU_REG_GET64(&(pte_array[i]),
940 940 AMD_IOMMU_PTDE_PR) == 0);
941 941 }
942 942
943 943 if (amd_iommu_move_to_freelist(pt) == DDI_SUCCESS)
944 944 return;
945 945
946 946 /* Unbind the handle */
947 947 if (ddi_dma_unbind_handle(pt->pt_dma_hdl) != DDI_SUCCESS) {
948 948 cmn_err(CE_WARN, "%s: %s%d: idx=%d, domainid=%d. "
949 949 "Failed to unbind handle: %p for IOMMU Page Table",
950 950 f, driver, instance, iommu->aiomt_idx, pt->pt_domainid,
951 951 (void *)pt->pt_dma_hdl);
952 952 }
953 953 /* Free the table memory allocated for DMA */
954 954 ddi_dma_mem_free(&pt->pt_mem_hdl);
955 955
956 956 /* Free the DMA handle */
957 957 ddi_dma_free_handle(&pt->pt_dma_hdl);
958 958
959 959 kmem_free(pt, sizeof (amd_iommu_page_table_t));
960 960
961 961 }
962 962
963 963 static int
964 964 init_pde(amd_iommu_page_table_t *ppt, amd_iommu_page_table_t *pt)
965 965 {
966 966 uint64_t *pdep = &(ppt->pt_pgtblva[pt->pt_index]);
967 967 uint64_t next_pgtable_pa_4K = (pt->pt_cookie.dmac_cookie_addr) >> 12;
968 968
969 969 /* nothing to set. PDE is already set */
970 970 if (AMD_IOMMU_REG_GET64(pdep, AMD_IOMMU_PTDE_PR) == 1) {
971 971 ASSERT(PT_REF_VALID(ppt));
972 972 ASSERT(PT_REF_VALID(pt));
973 973 ASSERT(ppt->pt_pte_ref[pt->pt_index] == 0);
974 974 ASSERT(AMD_IOMMU_REG_GET64(pdep, AMD_IOMMU_PTDE_ADDR)
975 975 == next_pgtable_pa_4K);
976 976 return (DDI_SUCCESS);
977 977 }
978 978
979 979 ppt->pt_ref++;
980 980 ASSERT(PT_REF_VALID(ppt));
981 981
982 982 /* Page Directories are always RW */
983 983 AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_IW, 1);
984 984 AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_IR, 1);
985 985 AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_ADDR,
986 986 next_pgtable_pa_4K);
987 987 pt->pt_parent = ppt;
988 988 AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_NXT_LVL,
989 989 pt->pt_level);
990 990 ppt->pt_pte_ref[pt->pt_index] = 0;
991 991 AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_PR, 1);
992 992 SYNC_FORDEV(ppt->pt_dma_hdl);
993 993 ASSERT(AMD_IOMMU_REG_GET64(pdep, AMD_IOMMU_PTDE_PR) == 1);
994 994
995 995 return (DDI_SUCCESS);
996 996 }
997 997
998 998 static int
999 999 init_pte(amd_iommu_page_table_t *pt, uint64_t pa, uint16_t index,
1000 1000 struct ddi_dma_req *dmareq)
1001 1001 {
1002 1002 uint64_t *ptep = &(pt->pt_pgtblva[index]);
1003 1003 uint64_t pa_4K = pa >> 12;
1004 1004 int R;
1005 1005 int W;
1006 1006
1007 1007 /* nothing to set if PTE is already set */
1008 1008 if (AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_PR) == 1) {
1009 1009 /*
1010 1010 * Adjust current permissions
1011 1011 * DDI_DMA_WRITE means direction of DMA is MEM -> I/O
1012 1012 * so that requires Memory READ permissions i.e. sense
1013 1013 * is inverted.
1014 1014 * Note: either or both of DD_DMA_READ/WRITE may be set
1015 1015 */
1016 1016 if (amd_iommu_no_RW_perms == 0) {
1017 1017 R = AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_IR);
1018 1018 W = AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_IW);
1019 1019 if (R == 0 && ((dmareq->dmar_flags & DDI_DMA_WRITE) ||
1020 1020 (dmareq->dmar_flags & DDI_DMA_RDWR))) {
1021 1021 AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 1);
1022 1022 }
1023 1023 if (W == 0 && ((dmareq->dmar_flags & DDI_DMA_READ) ||
1024 1024 (dmareq->dmar_flags & DDI_DMA_RDWR))) {
1025 1025 AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 1);
1026 1026 }
1027 1027 }
1028 1028 ASSERT(PT_REF_VALID(pt));
1029 1029 pt->pt_pte_ref[index]++;
1030 1030 ASSERT(AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_ADDR)
1031 1031 == pa_4K);
1032 1032 return (DDI_SUCCESS);
1033 1033 }
1034 1034
1035 1035 pt->pt_ref++;
1036 1036 ASSERT(PT_REF_VALID(pt));
1037 1037
1038 1038 /* see comment above about inverting sense of RD/WR */
1039 1039 if (amd_iommu_no_RW_perms == 0) {
1040 1040 AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 0);
1041 1041 AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 0);
1042 1042 if (dmareq->dmar_flags & DDI_DMA_RDWR) {
1043 1043 AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 1);
1044 1044 AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 1);
1045 1045 } else {
1046 1046 if (dmareq->dmar_flags & DDI_DMA_WRITE) {
1047 1047 AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 1);
1048 1048 }
1049 1049 if (dmareq->dmar_flags & DDI_DMA_READ) {
1050 1050 AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 1);
1051 1051 }
1052 1052 }
1053 1053 } else {
1054 1054 AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 1);
1055 1055 AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 1);
1056 1056 }
1057 1057
1058 1058 /* TODO what is correct for FC and U */
1059 1059 AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTE_FC, 0);
1060 1060 AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTE_U, 0);
1061 1061 AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_ADDR, pa_4K);
1062 1062 AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_NXT_LVL, 0);
1063 1063 ASSERT(pt->pt_pte_ref[index] == 0);
1064 1064 pt->pt_pte_ref[index] = 1;
1065 1065 AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_PR, 1);
1066 1066 SYNC_FORDEV(pt->pt_dma_hdl);
1067 1067 ASSERT(AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_PR) == 1);
1068 1068
1069 1069 return (DDI_SUCCESS);
1070 1070 }
1071 1071
1072 1072
1073 1073 static void
1074 1074 init_pt(amd_iommu_page_table_t *pt, amd_iommu_domain_t *dp,
1075 1075 int level, uint16_t index)
1076 1076 {
1077 1077 ASSERT(dp);
1078 1078
1079 1079 if (level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
1080 1080 dp->d_pgtable_root_4K = (pt->pt_cookie.dmac_cookie_addr) >> 12;
1081 1081 } else {
1082 1082 ASSERT(level >= 1 && level < AMD_IOMMU_PGTABLE_MAXLEVEL);
1083 1083 }
1084 1084
1085 1085 pt->pt_domainid = dp->d_domainid;
1086 1086 pt->pt_level = level;
1087 1087 pt->pt_index = index;
1088 1088 }
1089 1089
1090 1090 static int
1091 1091 amd_iommu_setup_1_pgtable(amd_iommu_t *iommu, dev_info_t *rdip,
1092 1092 struct ddi_dma_req *dmareq,
1093 1093 domain_id_t domainid, amd_iommu_domain_t *dp,
1094 1094 amd_iommu_page_table_t *ppt,
1095 1095 uint16_t index, int level, uint64_t va, uint64_t pa,
1096 1096 amd_iommu_page_table_t **ptp, uint16_t *next_idxp, const char *path,
1097 1097 int km_flags)
1098 1098 {
1099 1099 int error;
1100 1100 amd_iommu_page_table_t *pt;
1101 1101 const char *driver = ddi_driver_name(rdip);
1102 1102 int instance = ddi_get_instance(rdip);
1103 1103 const char *f = "amd_iommu_setup_1_pgtable";
1104 1104
1105 1105 *ptp = NULL;
1106 1106 *next_idxp = 0;
1107 1107 error = DDI_SUCCESS;
1108 1108
1109 1109 ASSERT(level > 0 && level <= AMD_IOMMU_PGTABLE_MAXLEVEL);
1110 1110
1111 1111 ASSERT(dp);
1112 1112 if (level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
1113 1113 ASSERT(ppt == NULL);
1114 1114 ASSERT(index == 0);
1115 1115 } else {
1116 1116 ASSERT(ppt);
1117 1117 }
1118 1118
1119 1119 /* Check if page table is already allocated */
1120 1120 if (pt = amd_iommu_lookup_pgtable(iommu, ppt, dp, level, index)) {
1121 1121 ASSERT(pt->pt_domainid == domainid);
1122 1122 ASSERT(pt->pt_level == level);
1123 1123 ASSERT(pt->pt_index == index);
1124 1124 goto out;
1125 1125 }
1126 1126
1127 1127 if ((error = amd_iommu_alloc_pgtable(iommu, domainid, path, &pt,
1128 1128 km_flags)) != DDI_SUCCESS) {
1129 1129 cmn_err(CE_WARN, "%s: %s%d: idx = %u, domainid = %d, va = %p "
1130 1130 "path = %s", f, driver, instance, iommu->aiomt_idx,
1131 1131 domainid, (void *)(uintptr_t)va, path);
1132 1132 return (error);
1133 1133 }
1134 1134
1135 1135 ASSERT(dp->d_domainid == domainid);
1136 1136
1137 1137 init_pt(pt, dp, level, index);
1138 1138
1139 1139 out:
1140 1140 if (level != AMD_IOMMU_PGTABLE_MAXLEVEL) {
1141 1141 error = init_pde(ppt, pt);
1142 1142 }
1143 1143
1144 1144 if (level == 1) {
1145 1145 ASSERT(error == DDI_SUCCESS);
1146 1146 error = init_pte(pt, pa, AMD_IOMMU_VA_BITS(va, level), dmareq);
1147 1147 } else {
1148 1148 *next_idxp = AMD_IOMMU_VA_BITS(va, level);
1149 1149 *ptp = pt;
1150 1150 }
1151 1151
1152 1152 return (error);
1153 1153 }
1154 1154
1155 1155 typedef enum {
1156 1156 PDTE_NOT_TORN = 0x1,
1157 1157 PDTE_TORN_DOWN = 0x2,
1158 1158 PGTABLE_TORN_DOWN = 0x4
1159 1159 } pdte_tear_t;
1160 1160
1161 1161 static pdte_tear_t
1162 1162 amd_iommu_teardown_pdte(amd_iommu_t *iommu,
1163 1163 amd_iommu_page_table_t *pt, int index)
1164 1164 {
1165 1165 uint8_t next_level;
1166 1166 pdte_tear_t retval;
1167 1167 uint64_t *ptdep = &(pt->pt_pgtblva[index]);
1168 1168
1169 1169 next_level = AMD_IOMMU_REG_GET64(ptdep,
1170 1170 AMD_IOMMU_PTDE_NXT_LVL);
1171 1171
1172 1172 if (AMD_IOMMU_REG_GET64(ptdep, AMD_IOMMU_PTDE_PR) == 1) {
1173 1173 if (pt->pt_level == 1) {
1174 1174 ASSERT(next_level == 0);
1175 1175 /* PTE */
1176 1176 pt->pt_pte_ref[index]--;
1177 1177 if (pt->pt_pte_ref[index] != 0) {
1178 1178 return (PDTE_NOT_TORN);
1179 1179 }
1180 1180 } else {
1181 1181 ASSERT(next_level != 0 && next_level != 7);
1182 1182 }
1183 1183 ASSERT(pt->pt_pte_ref[index] == 0);
1184 1184 ASSERT(PT_REF_VALID(pt));
1185 1185
1186 1186 AMD_IOMMU_REG_SET64(ptdep, AMD_IOMMU_PTDE_PR, 0);
1187 1187 SYNC_FORDEV(pt->pt_dma_hdl);
1188 1188 ASSERT(AMD_IOMMU_REG_GET64(ptdep,
1189 1189 AMD_IOMMU_PTDE_PR) == 0);
1190 1190 pt->pt_ref--;
1191 1191 ASSERT(PT_REF_VALID(pt));
1192 1192 retval = PDTE_TORN_DOWN;
1193 1193 } else {
1194 1194 ASSERT(0);
1195 1195 ASSERT(pt->pt_pte_ref[index] == 0);
1196 1196 ASSERT(PT_REF_VALID(pt));
1197 1197 retval = PDTE_NOT_TORN;
1198 1198 }
1199 1199
1200 1200 if (pt->pt_ref == 0) {
1201 1201 amd_iommu_free_pgtable(iommu, pt);
1202 1202 return (PGTABLE_TORN_DOWN);
1203 1203 }
1204 1204
1205 1205 return (retval);
1206 1206 }
1207 1207
1208 1208 static int
1209 1209 amd_iommu_create_pgtables(amd_iommu_t *iommu, dev_info_t *rdip,
1210 1210 struct ddi_dma_req *dmareq, uint64_t va,
1211 1211 uint64_t pa, uint16_t deviceid, domain_id_t domainid,
1212 1212 amd_iommu_domain_t *dp, const char *path, int km_flags)
1213 1213 {
1214 1214 int level;
1215 1215 uint16_t index;
1216 1216 uint16_t next_idx;
1217 1217 amd_iommu_page_table_t *pt;
1218 1218 amd_iommu_page_table_t *ppt;
1219 1219 int error;
1220 1220 const char *driver = ddi_driver_name(rdip);
1221 1221 int instance = ddi_get_instance(rdip);
1222 1222 const char *f = "amd_iommu_create_pgtables";
1223 1223
1224 1224 if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
1225 1225 cmn_err(CE_NOTE, "%s: %s%d: idx = %u, domainid = %d, "
1226 1226 "deviceid = %u, va = %p, pa = %p, path = %s",
1227 1227 f, driver, instance,
1228 1228 iommu->aiomt_idx, domainid, deviceid,
1229 1229 (void *)(uintptr_t)va,
1230 1230 (void *)(uintptr_t)pa, path);
1231 1231 }
1232 1232
1233 1233 if (domainid == AMD_IOMMU_PASSTHRU_DOMAIN) {
1234 1234 /* No need for pagetables. Just set up device table entry */
1235 1235 goto passthru;
1236 1236 }
1237 1237
1238 1238 index = 0;
1239 1239 ppt = NULL;
1240 1240 for (level = AMD_IOMMU_PGTABLE_MAXLEVEL; level > 0;
1241 1241 level--, pt = NULL, next_idx = 0) {
1242 1242 if ((error = amd_iommu_setup_1_pgtable(iommu, rdip, dmareq,
1243 1243 domainid, dp, ppt, index, level, va, pa, &pt,
1244 1244 &next_idx, path, km_flags)) != DDI_SUCCESS) {
1245 1245 cmn_err(CE_WARN, "%s: %s%d: idx=%d: domainid=%d, "
1246 1246 "deviceid=%u, va= %p, pa = %p, Failed to setup "
1247 1247 "page table(s) at level = %d, path = %s.",
1248 1248 f, driver, instance, iommu->aiomt_idx,
1249 1249 domainid, deviceid, (void *)(uintptr_t)va,
1250 1250 (void *)(uintptr_t)pa, level, path);
1251 1251 return (error);
1252 1252 }
1253 1253
1254 1254 if (level > 1) {
1255 1255 ASSERT(pt);
1256 1256 ASSERT(pt->pt_domainid == domainid);
1257 1257 ppt = pt;
1258 1258 index = next_idx;
1259 1259 } else {
1260 1260 ASSERT(level == 1);
1261 1261 ASSERT(pt == NULL);
1262 1262 ASSERT(next_idx == 0);
1263 1263 ppt = NULL;
1264 1264 index = 0;
1265 1265 }
1266 1266 }
1267 1267
1268 1268 passthru:
1269 1269 if ((error = amd_iommu_set_devtbl_entry(iommu, rdip, domainid, deviceid,
1270 1270 dp, path)) != DDI_SUCCESS) {
1271 1271 cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p, deviceid=%u, "
1272 1272 "domainid=%d."
1273 1273 "Failed to set device table entry for path %s.",
1274 1274 f, driver, instance,
1275 1275 iommu->aiomt_idx, (void *)rdip, deviceid, domainid, path);
1276 1276 return (error);
1277 1277 }
1278 1278
1279 1279 SYNC_FORDEV(iommu->aiomt_dmahdl);
1280 1280
1281 1281 return (DDI_SUCCESS);
1282 1282 }
1283 1283
1284 1284 static int
1285 1285 amd_iommu_destroy_pgtables(amd_iommu_t *iommu, dev_info_t *rdip,
1286 1286 uint64_t pageva, uint16_t deviceid, domain_id_t domainid,
1287 1287 amd_iommu_domain_t *dp, map_type_t type, int *domain_freed, char *path)
1288 1288 {
1289 1289 int level;
1290 1290 int flags;
1291 1291 amd_iommu_cmdargs_t cmdargs = {0};
1292 1292 uint16_t index;
1293 1293 uint16_t prev_index;
1294 1294 amd_iommu_page_table_t *pt;
1295 1295 amd_iommu_page_table_t *ppt;
1296 1296 pdte_tear_t retval;
1297 1297 int tear_level;
1298 1298 int invalidate_pte;
1299 1299 int invalidate_pde;
1300 1300 int error = DDI_FAILURE;
1301 1301 const char *driver = ddi_driver_name(iommu->aiomt_dip);
1302 1302 int instance = ddi_get_instance(iommu->aiomt_dip);
1303 1303 const char *f = "amd_iommu_destroy_pgtables";
1304 1304
1305 1305 if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
1306 1306 cmn_err(CE_NOTE, "%s: %s%d: idx = %u, domainid = %d, "
1307 1307 "deviceid = %u, va = %p, path = %s",
1308 1308 f, driver, instance,
1309 1309 iommu->aiomt_idx, domainid, deviceid,
1310 1310 (void *)(uintptr_t)pageva, path);
1311 1311 }
1312 1312
1313 1313 if (domainid == AMD_IOMMU_PASSTHRU_DOMAIN) {
1314 1314 /*
1315 1315 * there are no pagetables for the passthru domain.
1316 1316 * Just the device table entry
1317 1317 */
1318 1318 error = DDI_SUCCESS;
1319 1319 goto passthru;
1320 1320 }
1321 1321
1322 1322 ppt = NULL;
1323 1323 index = 0;
1324 1324 for (level = AMD_IOMMU_PGTABLE_MAXLEVEL; level > 0; level--) {
1325 1325 pt = amd_iommu_lookup_pgtable(iommu, ppt, dp, level, index);
1326 1326 if (pt) {
1327 1327 ppt = pt;
1328 1328 index = AMD_IOMMU_VA_BITS(pageva, level);
1329 1329 continue;
1330 1330 }
1331 1331 break;
1332 1332 }
1333 1333
1334 1334 if (level == 0) {
1335 1335 uint64_t *ptep;
1336 1336 uint64_t pa_4K;
1337 1337
1338 1338 ASSERT(pt);
1339 1339 ASSERT(pt == ppt);
1340 1340 ASSERT(pt->pt_domainid == dp->d_domainid);
1341 1341
1342 1342 ptep = &(pt->pt_pgtblva[index]);
1343 1343
1344 1344 pa_4K = AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_ADDR);
1345 1345 if (amd_iommu_unity_map || type == AMD_IOMMU_UNITY_MAP) {
1346 1346 ASSERT(pageva == (pa_4K << MMU_PAGESHIFT));
1347 1347 }
1348 1348 }
1349 1349
1350 1350 tear_level = -1;
1351 1351 invalidate_pde = 0;
1352 1352 invalidate_pte = 0;
1353 1353 for (++level; level <= AMD_IOMMU_PGTABLE_MAXLEVEL; level++) {
1354 1354 prev_index = pt->pt_index;
1355 1355 ppt = pt->pt_parent;
1356 1356 retval = amd_iommu_teardown_pdte(iommu, pt, index);
1357 1357 switch (retval) {
1358 1358 case PDTE_NOT_TORN:
1359 1359 goto invalidate;
1360 1360 case PDTE_TORN_DOWN:
1361 1361 invalidate_pte = 1;
1362 1362 goto invalidate;
1363 1363 case PGTABLE_TORN_DOWN:
1364 1364 invalidate_pte = 1;
1365 1365 invalidate_pde = 1;
1366 1366 tear_level = level;
1367 1367 break;
1368 1368 }
1369 1369 index = prev_index;
1370 1370 pt = ppt;
1371 1371 }
1372 1372
1373 1373 invalidate:
1374 1374 /*
1375 1375 * Now teardown the IOMMU HW caches if applicable
1376 1376 */
1377 1377 if (invalidate_pte) {
1378 1378 cmdargs.ca_domainid = (uint16_t)domainid;
1379 1379 if (amd_iommu_pageva_inval_all) {
1380 1380 cmdargs.ca_addr = (uintptr_t)0x7FFFFFFFFFFFF000;
1381 1381 flags = AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL |
1382 1382 AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S;
1383 1383 } else if (invalidate_pde) {
1384 1384 cmdargs.ca_addr =
1385 1385 (uintptr_t)AMD_IOMMU_VA_INVAL(pageva, tear_level);
1386 1386 flags = AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL |
1387 1387 AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S;
1388 1388 } else {
1389 1389 cmdargs.ca_addr = (uintptr_t)pageva;
1390 1390 flags = 0;
1391 1391 }
1392 1392 if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_IOMMU_PAGES,
1393 1393 &cmdargs, flags, 0) != DDI_SUCCESS) {
1394 1394 cmn_err(CE_WARN, "%s: %s%d: idx=%d: domainid=%d, "
1395 1395 "rdip=%p. Failed to invalidate IOMMU HW cache "
1396 1396 "for %s", f, driver, instance,
1397 1397 iommu->aiomt_idx, domainid, (void *)rdip, path);
1398 1398 error = DDI_FAILURE;
1399 1399 goto out;
1400 1400 }
1401 1401 }
1402 1402
1403 1403 passthru:
1404 1404 if (tear_level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
1405 1405 error = amd_iommu_clear_devtbl_entry(iommu, rdip, domainid,
1406 1406 deviceid, dp, domain_freed, path);
1407 1407 } else {
1408 1408 error = DDI_SUCCESS;
1409 1409 }
1410 1410
1411 1411 out:
1412 1412 SYNC_FORDEV(iommu->aiomt_dmahdl);
1413 1413
1414 1414 return (error);
1415 1415 }
1416 1416
1417 1417 static int
1418 1418 cvt_bind_error(int error)
1419 1419 {
1420 1420 switch (error) {
1421 1421 case DDI_DMA_MAPPED:
1422 1422 case DDI_DMA_PARTIAL_MAP:
1423 1423 case DDI_DMA_NORESOURCES:
1424 1424 case DDI_DMA_NOMAPPING:
1425 1425 break;
1426 1426 default:
1427 1427 cmn_err(CE_PANIC, "Unsupported error code: %d", error);
1428 1428 /*NOTREACHED*/
1429 1429 }
1430 1430 return (error);
1431 1431 }
1432 1432
1433 1433 int
1434 1434 amd_iommu_map_pa2va(amd_iommu_t *iommu, dev_info_t *rdip, ddi_dma_attr_t *attrp,
1435 1435 struct ddi_dma_req *dmareq, uint64_t start_pa, uint64_t pa_sz,
1436 1436 map_type_t type, uint64_t *start_vap, int km_flags)
1437 1437 {
1438 1438 pfn_t pfn_start;
1439 1439 pfn_t pfn_end;
1440 1440 pfn_t pfn;
1441 1441 int alias;
1442 1442 int32_t deviceid;
1443 1443 domain_id_t domainid;
1444 1444 amd_iommu_domain_t *dp;
1445 1445 uint64_t end_pa;
1446 1446 uint64_t start_va;
1447 1447 uint64_t end_va;
1448 1448 uint64_t pg_start;
1449 1449 uint64_t pg_end;
1450 1450 uint64_t pg;
1451 1451 uint64_t va_sz;
1452 1452 char *path;
1453 1453 int error = DDI_DMA_NOMAPPING;
1454 1454 const char *driver = ddi_driver_name(iommu->aiomt_dip);
1455 1455 int instance = ddi_get_instance(iommu->aiomt_dip);
1456 1456 const char *f = "amd_iommu_map_pa2va";
1457 1457
1458 1458 ASSERT(pa_sz != 0);
1459 1459
1460 1460 *start_vap = 0;
1461 1461
1462 1462 ASSERT(rdip);
1463 1463
1464 1464 path = kmem_alloc(MAXPATHLEN, km_flags);
1465 1465 if (path == NULL) {
1466 1466 error = DDI_DMA_NORESOURCES;
1467 1467 goto out;
1468 1468 }
1469 1469 (void) ddi_pathname(rdip, path);
1470 1470
1471 1471 /*
1472 1472 * First get deviceid
1473 1473 */
1474 1474 if (amd_iommu_get_deviceid(iommu, rdip, &deviceid, &alias, path)
1475 1475 != DDI_SUCCESS) {
1476 1476 cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p. "
1477 1477 "Failed to get device ID for %s.", f, driver, instance,
1478 1478 iommu->aiomt_idx, (void *)rdip, path);
1479 1479 error = DDI_DMA_NOMAPPING;
1480 1480 goto out;
1481 1481 }
1482 1482
1483 1483 /*
1484 1484 * Next get the domain for this rdip
1485 1485 */
1486 1486 if (amd_iommu_get_domain(iommu, rdip, alias, deviceid, &domainid, path)
1487 1487 != DDI_SUCCESS) {
1488 1488 cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p, path=%s. "
1489 1489 "Failed to get domain.", f, driver, instance,
1490 1490 iommu->aiomt_idx, (void *)rdip, path);
1491 1491 error = DDI_DMA_NOMAPPING;
1492 1492 goto out;
1493 1493 }
1494 1494
1495 1495 dp = amd_iommu_lookup_domain(iommu, domainid, type, km_flags);
1496 1496 if (dp == NULL) {
1497 1497 cmn_err(CE_WARN, "%s: %s%d: idx=%d: domainid=%d, rdip=%p. "
1498 1498 "Failed to get device ID for %s.", f, driver, instance,
1499 1499 iommu->aiomt_idx, domainid, (void *)rdip, path);
1500 1500 error = DDI_DMA_NORESOURCES;
1501 1501 goto out;
1502 1502 }
1503 1503
1504 1504 ASSERT(dp->d_domainid == domainid);
1505 1505
1506 1506 pfn_start = start_pa >> MMU_PAGESHIFT;
1507 1507
1508 1508 if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
1509 1509 cmn_err(CE_NOTE, "pa = %p, pfn_new = %p, pfn_start = %p, "
1510 1510 "pgshift = %d",
1511 1511 (void *)(uintptr_t)start_pa,
1512 1512 (void *)(uintptr_t)(start_pa >> MMU_PAGESHIFT),
1513 1513 (void *)(uintptr_t)pfn_start, MMU_PAGESHIFT);
1514 1514 }
1515 1515
1516 1516 end_pa = start_pa + pa_sz - 1;
1517 1517 pfn_end = end_pa >> MMU_PAGESHIFT;
1518 1518
1519 1519 if (amd_iommu_unity_map || type == AMD_IOMMU_UNITY_MAP) {
1520 1520 start_va = start_pa;
1521 1521 end_va = end_pa;
1522 1522 va_sz = pa_sz;
1523 1523 *start_vap = start_va;
1524 1524 } else {
1525 1525 va_sz = mmu_ptob(pfn_end - pfn_start + 1);
1526 1526 start_va = (uintptr_t)vmem_xalloc(dp->d_vmem, va_sz,
1527 1527 MAX(attrp->dma_attr_align, MMU_PAGESIZE),
1528 1528 0,
1529 1529 attrp->dma_attr_seg + 1,
1530 1530 (void *)(uintptr_t)attrp->dma_attr_addr_lo,
1531 1531 (void *)(uintptr_t)MIN((attrp->dma_attr_addr_hi + 1),
1532 1532 AMD_IOMMU_SIZE_4G), /* XXX rollover */
1533 1533 km_flags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP);
1534 1534 if (start_va == 0) {
1535 1535 cmn_err(CE_WARN, "%s: No VA resources",
1536 1536 amd_iommu_modname);
1537 1537 error = DDI_DMA_NORESOURCES;
1538 1538 goto out;
1539 1539 }
1540 1540 ASSERT((start_va & MMU_PAGEOFFSET) == 0);
1541 1541 end_va = start_va + va_sz - 1;
1542 1542 *start_vap = start_va + (start_pa & MMU_PAGEOFFSET);
1543 1543 }
1544 1544
1545 1545 pg_start = start_va >> MMU_PAGESHIFT;
1546 1546 pg_end = end_va >> MMU_PAGESHIFT;
1547 1547
1548 1548 pg = pg_start;
1549 1549 for (pfn = pfn_start; pfn <= pfn_end; pfn++, pg++) {
1550 1550
1551 1551 if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
1552 1552 cmn_err(CE_NOTE, "%s: attempting to create page tables "
1553 1553 "for pfn = %p, va = %p, path = %s",
1554 1554 f, (void *)(uintptr_t)(pfn << MMU_PAGESHIFT),
1555 1555 (void *)(uintptr_t)(pg << MMU_PAGESHIFT), path);
1556 1556
1557 1557 }
1558 1558
1559 1559 if (amd_iommu_unity_map || type == AMD_IOMMU_UNITY_MAP) {
1560 1560 ASSERT(pfn == pg);
1561 1561 }
1562 1562
1563 1563 if ((error = amd_iommu_create_pgtables(iommu, rdip, dmareq,
1564 1564 pg << MMU_PAGESHIFT,
1565 1565 pfn << MMU_PAGESHIFT, deviceid, domainid, dp, path,
1566 1566 km_flags)) != DDI_SUCCESS) {
1567 1567 cmn_err(CE_WARN, "Failed to create_pgtables");
1568 1568 goto out;
1569 1569 }
1570 1570
1571 1571 if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
1572 1572 cmn_err(CE_NOTE, "%s: successfully created page tables "
1573 1573 "for pfn = %p, vapg = %p, path = %s",
1574 1574 f, (void *)(uintptr_t)pfn,
1575 1575 (void *)(uintptr_t)pg, path);
1576 1576 }
1577 1577
1578 1578 }
1579 1579 ASSERT(pg == pg_end + 1);
1580 1580
1581 1581
1582 1582 if (amd_iommu_debug & AMD_IOMMU_DEBUG_PA2VA) {
1583 1583 cmn_err(CE_NOTE, "pa=%p, va=%p",
1584 1584 (void *)(uintptr_t)start_pa,
1585 1585 (void *)(uintptr_t)(*start_vap));
1586 1586 }
1587 1587 error = DDI_DMA_MAPPED;
1588 1588
1589 1589 out:
1590 1590 kmem_free(path, MAXPATHLEN);
1591 1591 return (cvt_bind_error(error));
1592 1592 }
1593 1593
1594 1594 int
1595 1595 amd_iommu_unmap_va(amd_iommu_t *iommu, dev_info_t *rdip, uint64_t start_va,
1596 1596 uint64_t va_sz, map_type_t type)
1597 1597 {
1598 1598 uint64_t end_va;
1599 1599 uint64_t pg_start;
1600 1600 uint64_t pg_end;
1601 1601 uint64_t pg;
1602 1602 uint64_t actual_sz;
1603 1603 char *path;
1604 1604 int pathfree;
1605 1605 int alias;
1606 1606 int32_t deviceid;
1607 1607 domain_id_t domainid;
1608 1608 amd_iommu_domain_t *dp;
1609 1609 int error;
1610 1610 int domain_freed;
1611 1611 const char *driver = ddi_driver_name(iommu->aiomt_dip);
1612 1612 int instance = ddi_get_instance(iommu->aiomt_dip);
1613 1613 const char *f = "amd_iommu_unmap_va";
1614 1614
1615 1615 if (amd_iommu_no_unmap)
1616 1616 return (DDI_SUCCESS);
1617 1617
1618 1618 path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
1619 1619 if (path) {
1620 1620 (void) ddi_pathname(rdip, path);
1621 1621 pathfree = 1;
1622 1622 } else {
1623 1623 pathfree = 0;
1624 1624 path = "<path-mem-alloc-failed>";
1625 1625 }
1626 1626
1627 1627 /*
1628 1628 * First get deviceid
1629 1629 */
1630 1630 if (amd_iommu_get_deviceid(iommu, rdip, &deviceid, &alias, path)
1631 1631 != DDI_SUCCESS) {
1632 1632 cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p. "
1633 1633 "Failed to get device ID for %s.", f, driver, instance,
1634 1634 iommu->aiomt_idx, (void *)rdip, path);
1635 1635 error = DDI_FAILURE;
1636 1636 goto out;
1637 1637 }
1638 1638
1639 1639 /*
1640 1640 * Next get the domain for this rdip
1641 1641 */
1642 1642 if (amd_iommu_get_domain(iommu, rdip, alias, deviceid, &domainid, path)
1643 1643 != DDI_SUCCESS) {
1644 1644 cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p, path=%s. "
1645 1645 "Failed to get domain.", f, driver, instance,
1646 1646 iommu->aiomt_idx, (void *)rdip, path);
1647 1647 error = DDI_FAILURE;
1648 1648 goto out;
1649 1649 }
1650 1650
1651 1651 /* should never result in domain allocation/vmem_create */
1652 1652 dp = amd_iommu_lookup_domain(iommu, domainid, AMD_IOMMU_INVALID_MAP,
1653 1653 KM_NOSLEEP);
1654 1654 if (dp == NULL) {
1655 1655 cmn_err(CE_WARN, "%s: %s%d: idx=%d: domainid=%d, rdip=%p. "
1656 1656 "Failed to get device ID for %s.", f, driver, instance,
1657 1657 iommu->aiomt_idx, domainid, (void *)rdip, path);
1658 1658 error = DDI_FAILURE;
1659 1659 goto out;
1660 1660 }
1661 1661
1662 1662 ASSERT(dp->d_domainid == domainid);
1663 1663
1664 1664 pg_start = start_va >> MMU_PAGESHIFT;
1665 1665 end_va = start_va + va_sz - 1;
1666 1666 pg_end = end_va >> MMU_PAGESHIFT;
1667 1667 actual_sz = (pg_end - pg_start + 1) << MMU_PAGESHIFT;
1668 1668
1669 1669 domain_freed = 0;
1670 1670 for (pg = pg_start; pg <= pg_end; pg++) {
1671 1671 domain_freed = 0;
1672 1672 if (amd_iommu_destroy_pgtables(iommu, rdip,
1673 1673 pg << MMU_PAGESHIFT, deviceid, domainid, dp, type,
1674 1674 &domain_freed, path) != DDI_SUCCESS) {
1675 1675 error = DDI_FAILURE;
1676 1676 goto out;
1677 1677 }
1678 1678 if (domain_freed) {
1679 1679 ASSERT(pg == pg_end);
1680 1680 break;
1681 1681 }
1682 1682 }
1683 1683
1684 1684 /*
1685 1685 * vmem_xalloc() must be paired with vmem_xfree
1686 1686 */
1687 1687 if (type == AMD_IOMMU_VMEM_MAP && !amd_iommu_unity_map) {
1688 1688 vmem_xfree(dp->d_vmem,
1689 1689 (void *)(uintptr_t)(pg_start << MMU_PAGESHIFT), actual_sz);
1690 1690 }
1691 1691
1692 1692 if (domain_freed)
1693 1693 amd_iommu_teardown_domain(iommu, dp);
1694 1694
1695 1695 error = DDI_SUCCESS;
1696 1696 out:
1697 1697 if (pathfree)
1698 1698 kmem_free(path, MAXPATHLEN);
1699 1699 return (error);
1700 1700 }
↓ open down ↓ |
1662 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX