Print this page
10703 smatch unreachable code checking needs reworking
Reviewed by: Toomas Soome <tsoome@me.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.c
+++ new/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
25 + * Copyright 2019 Joyent, Inc.
25 26 */
26 27
27 28 #include <sys/sunddi.h>
28 29 #include <sys/sunndi.h>
29 30 #include <sys/iommulib.h>
30 31 #include <sys/amd_iommu.h>
31 32 #include <sys/pci_cap.h>
32 33 #include <sys/bootconf.h>
33 34 #include <sys/ddidmareq.h>
34 35
35 36 #include "amd_iommu_impl.h"
36 37 #include "amd_iommu_acpi.h"
37 38 #include "amd_iommu_page_tables.h"
38 39
39 40 static int amd_iommu_fini(amd_iommu_t *iommu, int type);
40 41 static void amd_iommu_teardown_interrupts(amd_iommu_t *iommu);
41 42 static void amd_iommu_stop(amd_iommu_t *iommu);
42 43
43 44 static int amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip);
44 45 static int amd_iommu_allochdl(iommulib_handle_t handle,
45 46 dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
46 47 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep);
47 48 static int amd_iommu_freehdl(iommulib_handle_t handle,
48 49 dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
49 50 static int amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
50 51 dev_info_t *rdip, ddi_dma_handle_t dma_handle,
51 52 struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
52 53 uint_t *ccountp);
53 54 static int amd_iommu_unbindhdl(iommulib_handle_t handle,
54 55 dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
55 56 static int amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
56 57 dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
57 58 size_t len, uint_t cache_flags);
58 59 static int amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
59 60 dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
60 61 off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
61 62 uint_t *ccountp);
62 63 static int amd_iommu_mapobject(iommulib_handle_t handle, dev_info_t *dip,
63 64 dev_info_t *rdip, ddi_dma_handle_t dma_handle,
64 65 struct ddi_dma_req *dmareq, ddi_dma_obj_t *dmao);
65 66 static int amd_iommu_unmapobject(iommulib_handle_t handle, dev_info_t *dip,
66 67 dev_info_t *rdip, ddi_dma_handle_t dma_handle, ddi_dma_obj_t *dmao);
67 68
68 69 static int unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
69 70 ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked);
70 71
71 72 extern void *device_arena_alloc(size_t size, int vm_flag);
72 73 extern void device_arena_free(void * vaddr, size_t size);
73 74
74 75 ddi_dma_attr_t amd_iommu_dma_attr = {
75 76 DMA_ATTR_V0,
76 77 0U, /* dma_attr_addr_lo */
77 78 0xffffffffffffffffULL, /* dma_attr_addr_hi */
78 79 0xffffffffU, /* dma_attr_count_max */
79 80 (uint64_t)4096, /* dma_attr_align */
80 81 1, /* dma_attr_burstsizes */
81 82 64, /* dma_attr_minxfer */
82 83 0xffffffffU, /* dma_attr_maxxfer */
83 84 0xffffffffU, /* dma_attr_seg */
84 85 1, /* dma_attr_sgllen, variable */
85 86 64, /* dma_attr_granular */
86 87 0 /* dma_attr_flags */
87 88 };
88 89
89 90 ddi_device_acc_attr_t amd_iommu_devacc = {
90 91 DDI_DEVICE_ATTR_V0,
91 92 DDI_NEVERSWAP_ACC,
92 93 DDI_STRICTORDER_ACC
93 94 };
94 95
95 96 struct iommulib_ops amd_iommulib_ops = {
96 97 IOMMU_OPS_VERSION,
97 98 AMD_IOMMU,
98 99 "AMD IOMMU Vers. 1",
99 100 NULL,
100 101 amd_iommu_probe,
101 102 amd_iommu_allochdl,
102 103 amd_iommu_freehdl,
103 104 amd_iommu_bindhdl,
104 105 amd_iommu_unbindhdl,
105 106 amd_iommu_sync,
106 107 amd_iommu_win,
107 108 amd_iommu_mapobject,
108 109 amd_iommu_unmapobject,
109 110 };
110 111
111 112 static kmutex_t amd_iommu_pgtable_lock;
112 113
113 114 static int
114 115 amd_iommu_register(amd_iommu_t *iommu)
115 116 {
116 117 dev_info_t *dip = iommu->aiomt_dip;
117 118 const char *driver = ddi_driver_name(dip);
118 119 int instance = ddi_get_instance(dip);
119 120 iommulib_ops_t *iommulib_ops;
120 121 iommulib_handle_t handle;
121 122 const char *f = "amd_iommu_register";
122 123
123 124 iommulib_ops = kmem_zalloc(sizeof (iommulib_ops_t), KM_SLEEP);
124 125
125 126 *iommulib_ops = amd_iommulib_ops;
126 127
127 128 iommulib_ops->ilops_data = (void *)iommu;
128 129 iommu->aiomt_iommulib_ops = iommulib_ops;
129 130
130 131 if (iommulib_iommu_register(dip, iommulib_ops, &handle)
131 132 != DDI_SUCCESS) {
132 133 cmn_err(CE_WARN, "%s: %s%d: Register with iommulib "
133 134 "failed idx=%d", f, driver, instance, iommu->aiomt_idx);
134 135 kmem_free(iommulib_ops, sizeof (iommulib_ops_t));
135 136 return (DDI_FAILURE);
136 137 }
137 138
138 139 iommu->aiomt_iommulib_handle = handle;
139 140
140 141 return (DDI_SUCCESS);
141 142 }
142 143
143 144 static int
144 145 amd_iommu_unregister(amd_iommu_t *iommu)
145 146 {
146 147 if (iommu->aiomt_iommulib_handle == NULL) {
147 148 /* we never registered */
148 149 return (DDI_SUCCESS);
149 150 }
150 151
151 152 if (iommulib_iommu_unregister(iommu->aiomt_iommulib_handle)
152 153 != DDI_SUCCESS) {
153 154 return (DDI_FAILURE);
154 155 }
155 156
156 157 kmem_free(iommu->aiomt_iommulib_ops, sizeof (iommulib_ops_t));
157 158 iommu->aiomt_iommulib_ops = NULL;
158 159 iommu->aiomt_iommulib_handle = NULL;
159 160
160 161 return (DDI_SUCCESS);
161 162 }
162 163
163 164 static int
164 165 amd_iommu_setup_passthru(amd_iommu_t *iommu)
165 166 {
166 167 gfx_entry_t *gfxp;
167 168 dev_info_t *dip;
168 169
169 170 /*
170 171 * Setup passthru mapping for "special" devices
171 172 */
172 173 amd_iommu_set_passthru(iommu, NULL);
173 174
174 175 for (gfxp = gfx_devinfo_list; gfxp; gfxp = gfxp->g_next) {
175 176 gfxp->g_ref++;
176 177 dip = gfxp->g_dip;
177 178 if (dip) {
178 179 amd_iommu_set_passthru(iommu, dip);
179 180 }
180 181 gfxp->g_ref--;
181 182 }
182 183
183 184 return (DDI_SUCCESS);
184 185 }
185 186
186 187 static int
187 188 amd_iommu_start(amd_iommu_t *iommu)
188 189 {
189 190 dev_info_t *dip = iommu->aiomt_dip;
190 191 int instance = ddi_get_instance(dip);
191 192 const char *driver = ddi_driver_name(dip);
192 193 amd_iommu_acpi_ivhd_t *hinfop;
193 194 const char *f = "amd_iommu_start";
194 195
195 196 hinfop = amd_iommu_lookup_all_ivhd();
196 197
197 198 /*
198 199 * Disable HT tunnel translation.
199 200 * XXX use ACPI
200 201 */
201 202 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
202 203 AMD_IOMMU_HT_TUN_ENABLE, 0);
203 204
204 205 if (hinfop) {
205 206 if (amd_iommu_debug) {
206 207 cmn_err(CE_NOTE,
207 208 "amd_iommu: using ACPI for CTRL registers");
208 209 }
209 210 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
210 211 AMD_IOMMU_ISOC, hinfop->ach_Isoc);
211 212 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
212 213 AMD_IOMMU_RESPASSPW, hinfop->ach_ResPassPW);
213 214 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
214 215 AMD_IOMMU_PASSPW, hinfop->ach_PassPW);
215 216 }
216 217
217 218 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
218 219 AMD_IOMMU_INVTO, 5);
219 220
220 221
221 222 /*
222 223 * The Device table entry bit 0 (V) controls whether the device
223 224 * table entry is valid for address translation and Device table
224 225 * entry bit 128 (IV) controls whether interrupt remapping is valid.
225 226 * By setting both to zero we are essentially doing pass-thru. Since
226 227 * this table is zeroed on allocation, essentially we will have
227 228 * pass-thru when IOMMU is enabled.
228 229 */
229 230
230 231 /* Finally enable the IOMMU ... */
231 232 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
232 233 AMD_IOMMU_ENABLE, 1);
233 234
234 235 if (amd_iommu_debug) {
235 236 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
236 237 "Successfully started AMD IOMMU", f, driver, instance,
237 238 iommu->aiomt_idx);
238 239 }
239 240 cmn_err(CE_NOTE, "AMD IOMMU (%d,%d) enabled",
240 241 instance, iommu->aiomt_idx);
241 242
242 243 return (DDI_SUCCESS);
243 244 }
244 245
245 246 static void
246 247 amd_iommu_stop(amd_iommu_t *iommu)
247 248 {
248 249 dev_info_t *dip = iommu->aiomt_dip;
249 250 int instance = ddi_get_instance(dip);
250 251 const char *driver = ddi_driver_name(dip);
251 252 const char *f = "amd_iommu_stop";
252 253
253 254 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
254 255 AMD_IOMMU_ENABLE, 0);
255 256 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
256 257 AMD_IOMMU_EVENTINT_ENABLE, 0);
257 258 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
258 259 AMD_IOMMU_COMWAITINT_ENABLE, 0);
259 260 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
260 261 AMD_IOMMU_EVENTLOG_ENABLE, 0);
261 262
262 263 /*
263 264 * Disable translation on HT tunnel traffic
264 265 */
265 266 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
266 267 AMD_IOMMU_HT_TUN_ENABLE, 0);
267 268 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
268 269 AMD_IOMMU_CMDBUF_ENABLE, 0);
269 270
270 271 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMYU idx=%d. "
271 272 "Successfully stopped AMD IOMMU", f, driver, instance,
272 273 iommu->aiomt_idx);
273 274 }
274 275
275 276 static int
276 277 amd_iommu_setup_tables_and_buffers(amd_iommu_t *iommu)
277 278 {
278 279 dev_info_t *dip = iommu->aiomt_dip;
279 280 int instance = ddi_get_instance(dip);
280 281 const char *driver = ddi_driver_name(dip);
281 282 uint32_t dma_bufsz;
282 283 caddr_t addr;
283 284 uint32_t sz;
284 285 uint32_t p2sz;
285 286 int i;
286 287 uint64_t *dentry;
287 288 int err;
288 289 const char *f = "amd_iommu_setup_tables_and_buffers";
289 290
290 291 /*
291 292 * We will put the Device Table, Command Buffer and
292 293 * Event Log in contiguous memory. Allocate the maximum
293 294 * size allowed for such structures
294 295 * Device Table: 256b * 64K = 32B * 64K
295 296 * Command Buffer: 128b * 32K = 16B * 32K
296 297 * Event Log: 128b * 32K = 16B * 32K
297 298 */
298 299 iommu->aiomt_devtbl_sz = (1<<AMD_IOMMU_DEVTBL_SZ) * AMD_IOMMU_DEVENT_SZ;
299 300 iommu->aiomt_cmdbuf_sz = (1<<AMD_IOMMU_CMDBUF_SZ) * AMD_IOMMU_CMD_SZ;
300 301 iommu->aiomt_eventlog_sz =
301 302 (1<<AMD_IOMMU_EVENTLOG_SZ) * AMD_IOMMU_EVENT_SZ;
302 303
303 304 dma_bufsz = iommu->aiomt_devtbl_sz + iommu->aiomt_cmdbuf_sz
304 305 + iommu->aiomt_eventlog_sz;
305 306
306 307 /*
307 308 * Alloc a DMA handle.
308 309 */
309 310 err = ddi_dma_alloc_handle(dip, &amd_iommu_dma_attr,
310 311 DDI_DMA_SLEEP, NULL, &iommu->aiomt_dmahdl);
311 312 if (err != DDI_SUCCESS) {
312 313 cmn_err(CE_WARN, "%s: %s%d: Cannot alloc DMA handle for "
313 314 "AMD IOMMU tables and buffers", f, driver, instance);
314 315 return (DDI_FAILURE);
315 316 }
316 317
317 318 /*
318 319 * Alloc memory for tables and buffers
319 320 * XXX remove cast to size_t
320 321 */
321 322 err = ddi_dma_mem_alloc(iommu->aiomt_dmahdl, dma_bufsz,
322 323 &amd_iommu_devacc, DDI_DMA_CONSISTENT|IOMEM_DATA_UNCACHED,
323 324 DDI_DMA_SLEEP, NULL, (caddr_t *)&iommu->aiomt_dma_bufva,
324 325 (size_t *)&iommu->aiomt_dma_mem_realsz, &iommu->aiomt_dma_mem_hdl);
325 326 if (err != DDI_SUCCESS) {
326 327 cmn_err(CE_WARN, "%s: %s%d: Cannot alloc memory for DMA "
327 328 "to AMD IOMMU tables and buffers", f, driver, instance);
328 329 iommu->aiomt_dma_bufva = NULL;
329 330 iommu->aiomt_dma_mem_realsz = 0;
330 331 ddi_dma_free_handle(&iommu->aiomt_dmahdl);
331 332 iommu->aiomt_dmahdl = NULL;
332 333 return (DDI_FAILURE);
333 334 }
334 335
335 336 /*
336 337 * The VA must be 4K aligned and >= table size
337 338 */
338 339 ASSERT(((uintptr_t)iommu->aiomt_dma_bufva &
339 340 AMD_IOMMU_TABLE_ALIGN) == 0);
340 341 ASSERT(iommu->aiomt_dma_mem_realsz >= dma_bufsz);
341 342
342 343 /*
343 344 * Now bind the handle
344 345 */
345 346 err = ddi_dma_addr_bind_handle(iommu->aiomt_dmahdl, NULL,
346 347 iommu->aiomt_dma_bufva, iommu->aiomt_dma_mem_realsz,
347 348 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
348 349 NULL, &iommu->aiomt_buf_dma_cookie, &iommu->aiomt_buf_dma_ncookie);
349 350 if (err != DDI_DMA_MAPPED) {
350 351 cmn_err(CE_WARN, "%s: %s%d: Cannot bind memory for DMA "
351 352 "to AMD IOMMU tables and buffers. bufrealsz=%p",
352 353 f, driver, instance,
353 354 (void *)(uintptr_t)iommu->aiomt_dma_mem_realsz);
354 355 iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
355 356 iommu->aiomt_buf_dma_cookie.dmac_size = 0;
356 357 iommu->aiomt_buf_dma_cookie.dmac_type = 0;
357 358 iommu->aiomt_buf_dma_ncookie = 0;
358 359 ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
359 360 iommu->aiomt_dma_mem_hdl = NULL;
360 361 iommu->aiomt_dma_bufva = NULL;
361 362 iommu->aiomt_dma_mem_realsz = 0;
362 363 ddi_dma_free_handle(&iommu->aiomt_dmahdl);
363 364 iommu->aiomt_dmahdl = NULL;
364 365 return (DDI_FAILURE);
365 366 }
366 367
367 368 /*
368 369 * We assume the DMA engine on the IOMMU is capable of handling the
369 370 * whole table buffer in a single cookie. If not and multiple cookies
370 371 * are needed we fail.
371 372 */
372 373 if (iommu->aiomt_buf_dma_ncookie != 1) {
373 374 cmn_err(CE_WARN, "%s: %s%d: Cannot handle multiple "
374 375 "cookies for DMA to AMD IOMMU tables and buffers. "
375 376 "#cookies=%u", f, driver, instance,
376 377 iommu->aiomt_buf_dma_ncookie);
377 378 (void) ddi_dma_unbind_handle(iommu->aiomt_dmahdl);
378 379 iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
379 380 iommu->aiomt_buf_dma_cookie.dmac_size = 0;
380 381 iommu->aiomt_buf_dma_cookie.dmac_type = 0;
381 382 iommu->aiomt_buf_dma_ncookie = 0;
382 383 ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
383 384 iommu->aiomt_dma_mem_hdl = NULL;
384 385 iommu->aiomt_dma_bufva = NULL;
385 386 iommu->aiomt_dma_mem_realsz = 0;
386 387 ddi_dma_free_handle(&iommu->aiomt_dmahdl);
387 388 iommu->aiomt_dmahdl = NULL;
388 389 return (DDI_FAILURE);
389 390 }
390 391
391 392 /*
392 393 * The address in the cookie must be 4K aligned and >= table size
393 394 */
394 395 ASSERT((iommu->aiomt_buf_dma_cookie.dmac_cookie_addr
395 396 & AMD_IOMMU_TABLE_ALIGN) == 0);
396 397 ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size
397 398 <= iommu->aiomt_dma_mem_realsz);
398 399 ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size >= dma_bufsz);
399 400
400 401 /*
401 402 * Setup the device table pointers in the iommu struct as
402 403 * well as the IOMMU device table register
403 404 */
404 405 iommu->aiomt_devtbl = iommu->aiomt_dma_bufva;
405 406 bzero(iommu->aiomt_devtbl, iommu->aiomt_devtbl_sz);
406 407
407 408 /*
408 409 * Set V=1 and TV = 0, so any inadvertant pass-thrus cause
409 410 * page faults. Also set SE bit so we aren't swamped with
410 411 * page fault messages
411 412 */
412 413 for (i = 0; i <= AMD_IOMMU_MAX_DEVICEID; i++) {
413 414 /*LINTED*/
414 415 dentry = (uint64_t *)&iommu->aiomt_devtbl
415 416 [i * AMD_IOMMU_DEVTBL_ENTRY_SZ];
416 417 AMD_IOMMU_REG_SET64(dentry, AMD_IOMMU_DEVTBL_V, 1);
417 418 AMD_IOMMU_REG_SET64(&(dentry[1]), AMD_IOMMU_DEVTBL_SE, 1);
418 419 }
419 420
420 421 addr = (caddr_t)(uintptr_t)iommu->aiomt_buf_dma_cookie.dmac_cookie_addr;
421 422 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
422 423 AMD_IOMMU_DEVTABBASE, ((uint64_t)(uintptr_t)addr) >> 12);
423 424 sz = (iommu->aiomt_devtbl_sz >> 12) - 1;
424 425 ASSERT(sz <= ((1 << 9) - 1));
425 426 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
426 427 AMD_IOMMU_DEVTABSIZE, sz);
427 428
428 429 /*
429 430 * Setup the command buffer pointers
430 431 */
431 432 iommu->aiomt_cmdbuf = iommu->aiomt_devtbl +
432 433 iommu->aiomt_devtbl_sz;
433 434 bzero(iommu->aiomt_cmdbuf, iommu->aiomt_cmdbuf_sz);
434 435 addr += iommu->aiomt_devtbl_sz;
435 436 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
436 437 AMD_IOMMU_COMBASE, ((uint64_t)(uintptr_t)addr) >> 12);
437 438
438 439 p2sz = AMD_IOMMU_CMDBUF_SZ;
439 440 ASSERT(p2sz >= AMD_IOMMU_CMDBUF_MINSZ &&
440 441 p2sz <= AMD_IOMMU_CMDBUF_MAXSZ);
441 442 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
442 443 AMD_IOMMU_COMLEN, p2sz);
443 444 /*LINTED*/
444 445 iommu->aiomt_cmd_tail = (uint32_t *)iommu->aiomt_cmdbuf;
445 446 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
446 447 AMD_IOMMU_CMDHEADPTR, 0);
447 448 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_tail_va),
448 449 AMD_IOMMU_CMDTAILPTR, 0);
449 450
450 451 /*
451 452 * Setup the event log pointers
452 453 */
453 454 iommu->aiomt_eventlog = iommu->aiomt_cmdbuf +
454 455 iommu->aiomt_eventlog_sz;
455 456 bzero(iommu->aiomt_eventlog, iommu->aiomt_eventlog_sz);
456 457 addr += iommu->aiomt_cmdbuf_sz;
457 458 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
458 459 AMD_IOMMU_EVENTBASE, ((uint64_t)(uintptr_t)addr) >> 12);
459 460 p2sz = AMD_IOMMU_EVENTLOG_SZ;
460 461 ASSERT(p2sz >= AMD_IOMMU_EVENTLOG_MINSZ &&
461 462 p2sz <= AMD_IOMMU_EVENTLOG_MAXSZ);
462 463 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
463 464 AMD_IOMMU_EVENTLEN, sz);
464 465 /*LINTED*/
465 466 iommu->aiomt_event_head = (uint32_t *)iommu->aiomt_eventlog;
466 467 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
467 468 AMD_IOMMU_EVENTHEADPTR, 0);
468 469 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_tail_va),
469 470 AMD_IOMMU_EVENTTAILPTR, 0);
470 471
471 472 /* dma sync so device sees this init */
472 473 SYNC_FORDEV(iommu->aiomt_dmahdl);
473 474
474 475 if (amd_iommu_debug & AMD_IOMMU_DEBUG_TABLES) {
475 476 cmn_err(CE_NOTE, "%s: %s%d: successfully setup AMD IOMMU "
476 477 "tables, idx=%d", f, driver, instance, iommu->aiomt_idx);
477 478 }
478 479
479 480 return (DDI_SUCCESS);
480 481 }
481 482
482 483 static void
483 484 amd_iommu_teardown_tables_and_buffers(amd_iommu_t *iommu, int type)
484 485 {
485 486 dev_info_t *dip = iommu->aiomt_dip;
486 487 int instance = ddi_get_instance(dip);
487 488 const char *driver = ddi_driver_name(dip);
488 489 const char *f = "amd_iommu_teardown_tables_and_buffers";
489 490
490 491 iommu->aiomt_eventlog = NULL;
491 492 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
492 493 AMD_IOMMU_EVENTBASE, 0);
493 494 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
494 495 AMD_IOMMU_EVENTLEN, 0);
495 496 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
496 497 AMD_IOMMU_EVENTHEADPTR, 0);
497 498 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
498 499 AMD_IOMMU_EVENTTAILPTR, 0);
499 500
500 501
501 502 iommu->aiomt_cmdbuf = NULL;
502 503 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
503 504 AMD_IOMMU_COMBASE, 0);
504 505 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
505 506 AMD_IOMMU_COMLEN, 0);
506 507 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
507 508 AMD_IOMMU_CMDHEADPTR, 0);
508 509 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
509 510 AMD_IOMMU_CMDTAILPTR, 0);
510 511
511 512
512 513 iommu->aiomt_devtbl = NULL;
513 514 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
514 515 AMD_IOMMU_DEVTABBASE, 0);
515 516 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
516 517 AMD_IOMMU_DEVTABSIZE, 0);
517 518
518 519 if (iommu->aiomt_dmahdl == NULL || type == AMD_IOMMU_QUIESCE)
519 520 return;
520 521
521 522 /* Unbind the handle */
522 523 if (ddi_dma_unbind_handle(iommu->aiomt_dmahdl) != DDI_SUCCESS) {
523 524 cmn_err(CE_WARN, "%s: %s%d: failed to unbind handle: "
524 525 "%p for IOMMU idx=%d", f, driver, instance,
525 526 (void *)iommu->aiomt_dmahdl, iommu->aiomt_idx);
526 527 }
527 528 iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
528 529 iommu->aiomt_buf_dma_cookie.dmac_size = 0;
529 530 iommu->aiomt_buf_dma_cookie.dmac_type = 0;
530 531 iommu->aiomt_buf_dma_ncookie = 0;
531 532
532 533 /* Free the table memory allocated for DMA */
533 534 ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
534 535 iommu->aiomt_dma_mem_hdl = NULL;
535 536 iommu->aiomt_dma_bufva = NULL;
536 537 iommu->aiomt_dma_mem_realsz = 0;
537 538
538 539 /* Free the DMA handle */
539 540 ddi_dma_free_handle(&iommu->aiomt_dmahdl);
540 541 iommu->aiomt_dmahdl = NULL;
541 542 }
542 543
543 544 static void
544 545 amd_iommu_enable_interrupts(amd_iommu_t *iommu)
545 546 {
546 547 ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
547 548 AMD_IOMMU_CMDBUF_RUN) == 0);
548 549 ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
549 550 AMD_IOMMU_EVENT_LOG_RUN) == 0);
550 551
551 552 /* Must be set prior to enabling command buffer */
552 553 /* Must be set prior to enabling event logging */
553 554 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
554 555 AMD_IOMMU_CMDBUF_ENABLE, 1);
555 556 /* No interrupts for completion wait - too heavy weight. use polling */
556 557 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
557 558 AMD_IOMMU_COMWAITINT_ENABLE, 0);
558 559 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
559 560 AMD_IOMMU_EVENTLOG_ENABLE, 1);
560 561 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
561 562 AMD_IOMMU_EVENTINT_ENABLE, 1);
562 563 }
563 564
564 565 static int
565 566 amd_iommu_setup_exclusion(amd_iommu_t *iommu)
566 567 {
567 568 amd_iommu_acpi_ivmd_t *minfop;
568 569
569 570 minfop = amd_iommu_lookup_all_ivmd();
570 571
571 572 if (minfop && minfop->acm_ExclRange == 1) {
572 573 cmn_err(CE_NOTE, "Programming exclusion range");
573 574 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
574 575 AMD_IOMMU_EXCL_BASE_ADDR,
575 576 minfop->acm_ivmd_phys_start >> 12);
576 577 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
577 578 AMD_IOMMU_EXCL_BASE_ALLOW, 1);
578 579 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
579 580 AMD_IOMMU_EXCL_BASE_EXEN, 1);
580 581 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
581 582 AMD_IOMMU_EXCL_LIM, (minfop->acm_ivmd_phys_start +
582 583 minfop->acm_ivmd_phys_len) >> 12);
583 584 } else {
584 585 if (amd_iommu_debug) {
585 586 cmn_err(CE_NOTE, "Skipping exclusion range");
586 587 }
587 588 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
588 589 AMD_IOMMU_EXCL_BASE_ADDR, 0);
589 590 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
590 591 AMD_IOMMU_EXCL_BASE_ALLOW, 1);
591 592 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
592 593 AMD_IOMMU_EXCL_BASE_EXEN, 0);
593 594 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
594 595 AMD_IOMMU_EXCL_LIM, 0);
595 596 }
596 597
597 598 return (DDI_SUCCESS);
598 599 }
599 600
600 601 static void
601 602 amd_iommu_teardown_exclusion(amd_iommu_t *iommu)
602 603 {
603 604 (void) amd_iommu_setup_exclusion(iommu);
604 605 }
605 606
606 607 static uint_t
607 608 amd_iommu_intr_handler(caddr_t arg1, caddr_t arg2)
608 609 {
609 610 /*LINTED*/
610 611 amd_iommu_t *iommu = (amd_iommu_t *)arg1;
611 612 dev_info_t *dip = iommu->aiomt_dip;
612 613 int instance = ddi_get_instance(dip);
613 614 const char *driver = ddi_driver_name(dip);
614 615 const char *f = "amd_iommu_intr_handler";
615 616
616 617 ASSERT(arg1);
617 618 ASSERT(arg2 == NULL);
618 619
619 620 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
620 621 cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d. In INTR handler",
621 622 f, driver, instance, iommu->aiomt_idx);
622 623 }
623 624
624 625 if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
625 626 AMD_IOMMU_EVENT_LOG_INT) == 1) {
626 627 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
627 628 cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d "
628 629 "Event Log Interrupt", f, driver, instance,
629 630 iommu->aiomt_idx);
630 631 }
631 632 (void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISPLAY);
632 633 WAIT_SEC(1);
633 634 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
634 635 AMD_IOMMU_EVENT_LOG_INT, 1);
635 636 return (DDI_INTR_CLAIMED);
636 637 }
637 638
638 639 if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
639 640 AMD_IOMMU_EVENT_OVERFLOW_INT) == 1) {
640 641 cmn_err(CE_NOTE, "!%s: %s%d: IOMMU unit idx=%d "
641 642 "Event Overflow Interrupt", f, driver, instance,
642 643 iommu->aiomt_idx);
643 644 (void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISCARD);
644 645 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
645 646 AMD_IOMMU_EVENT_LOG_INT, 1);
646 647 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
647 648 AMD_IOMMU_EVENT_OVERFLOW_INT, 1);
648 649 return (DDI_INTR_CLAIMED);
649 650 }
650 651
651 652 return (DDI_INTR_UNCLAIMED);
652 653 }
653 654
654 655
655 656 static int
656 657 amd_iommu_setup_interrupts(amd_iommu_t *iommu)
657 658 {
658 659 dev_info_t *dip = iommu->aiomt_dip;
659 660 int instance = ddi_get_instance(dip);
660 661 const char *driver = ddi_driver_name(dip);
661 662 int intrcap0;
662 663 int intrcapN;
663 664 int type;
664 665 int err;
665 666 int req;
666 667 int avail;
667 668 int p2req;
668 669 int actual;
669 670 int i;
670 671 int j;
671 672 const char *f = "amd_iommu_setup_interrupts";
672 673
673 674 if (ddi_intr_get_supported_types(dip, &type) != DDI_SUCCESS) {
674 675 cmn_err(CE_WARN, "%s: %s%d: ddi_intr_get_supported_types "
675 676 "failed: idx=%d", f, driver, instance, iommu->aiomt_idx);
676 677 return (DDI_FAILURE);
677 678 }
678 679
679 680 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
680 681 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
681 682 "Interrupt types supported = 0x%x", f, driver, instance,
682 683 iommu->aiomt_idx, type);
683 684 }
684 685
685 686 /*
686 687 * for now we only support MSI
687 688 */
688 689 if ((type & DDI_INTR_TYPE_MSI) == 0) {
689 690 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
690 691 "MSI interrupts not supported. Failing init.",
691 692 f, driver, instance, iommu->aiomt_idx);
692 693 return (DDI_FAILURE);
693 694 }
694 695
695 696 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
696 697 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. MSI supported",
697 698 f, driver, instance, iommu->aiomt_idx);
698 699 }
699 700
700 701 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_MSI, &req);
701 702 if (err != DDI_SUCCESS) {
702 703 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
703 704 "ddi_intr_get_nintrs failed err = %d",
704 705 f, driver, instance, iommu->aiomt_idx, err);
705 706 return (DDI_FAILURE);
706 707 }
707 708
708 709 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
709 710 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
710 711 "MSI number of interrupts requested: %d",
711 712 f, driver, instance, iommu->aiomt_idx, req);
712 713 }
713 714
714 715 if (req == 0) {
715 716 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
716 717 "interrupts requested. Failing init", f,
717 718 driver, instance, iommu->aiomt_idx);
718 719 return (DDI_FAILURE);
719 720 }
720 721
721 722 err = ddi_intr_get_navail(dip, DDI_INTR_TYPE_MSI, &avail);
722 723 if (err != DDI_SUCCESS) {
723 724 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d "
724 725 "ddi_intr_get_navail failed err = %d", f,
725 726 driver, instance, iommu->aiomt_idx, err);
726 727 return (DDI_FAILURE);
727 728 }
728 729
729 730 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
730 731 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
731 732 "MSI number of interrupts available: %d",
732 733 f, driver, instance, iommu->aiomt_idx, avail);
733 734 }
734 735
735 736 if (avail == 0) {
736 737 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
737 738 "interrupts available. Failing init", f,
738 739 driver, instance, iommu->aiomt_idx);
739 740 return (DDI_FAILURE);
740 741 }
741 742
742 743 if (avail < req) {
743 744 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: MSI "
744 745 "interrupts: requested (%d) > available (%d). "
745 746 "Failing init", f, driver, instance, iommu->aiomt_idx,
746 747 req, avail);
747 748 return (DDI_FAILURE);
748 749 }
749 750
750 751 /* Allocate memory for DDI interrupt handles */
751 752 iommu->aiomt_intr_htable_sz = req * sizeof (ddi_intr_handle_t);
752 753 iommu->aiomt_intr_htable = kmem_zalloc(iommu->aiomt_intr_htable_sz,
753 754 KM_SLEEP);
754 755
755 756 iommu->aiomt_intr_state = AMD_IOMMU_INTR_TABLE;
756 757
757 758 /* Convert req to a power of two as required by ddi_intr_alloc */
758 759 p2req = 0;
759 760 while (1<<p2req <= req)
760 761 p2req++;
761 762 p2req--;
762 763 req = 1<<p2req;
763 764
764 765 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
765 766 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
766 767 "MSI power of 2 number of interrupts: %d,%d",
767 768 f, driver, instance, iommu->aiomt_idx, p2req, req);
768 769 }
769 770
770 771 err = ddi_intr_alloc(iommu->aiomt_dip, iommu->aiomt_intr_htable,
771 772 DDI_INTR_TYPE_MSI, 0, req, &actual, DDI_INTR_ALLOC_STRICT);
772 773 if (err != DDI_SUCCESS) {
773 774 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
774 775 "ddi_intr_alloc failed: err = %d",
775 776 f, driver, instance, iommu->aiomt_idx, err);
776 777 amd_iommu_teardown_interrupts(iommu);
777 778 return (DDI_FAILURE);
778 779 }
779 780
780 781 iommu->aiomt_actual_intrs = actual;
781 782 iommu->aiomt_intr_state = AMD_IOMMU_INTR_ALLOCED;
782 783
783 784 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
784 785 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
785 786 "number of interrupts actually allocated %d",
786 787 f, driver, instance, iommu->aiomt_idx, actual);
787 788 }
788 789
789 790 if (iommu->aiomt_actual_intrs < req) {
790 791 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
791 792 "ddi_intr_alloc failed: actual (%d) < req (%d)",
792 793 f, driver, instance, iommu->aiomt_idx,
793 794 iommu->aiomt_actual_intrs, req);
794 795 amd_iommu_teardown_interrupts(iommu);
795 796 return (DDI_FAILURE);
796 797 }
797 798
798 799 for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
799 800 if (ddi_intr_add_handler(iommu->aiomt_intr_htable[i],
800 801 amd_iommu_intr_handler, (void *)iommu, NULL)
801 802 != DDI_SUCCESS) {
802 803 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
803 804 "ddi_intr_add_handler failed: intr = %d, err = %d",
804 805 f, driver, instance, iommu->aiomt_idx, i, err);
805 806 for (j = 0; j < i; j++) {
806 807 (void) ddi_intr_remove_handler(
807 808 iommu->aiomt_intr_htable[j]);
808 809 }
809 810 amd_iommu_teardown_interrupts(iommu);
810 811 return (DDI_FAILURE);
811 812 }
812 813 }
813 814 iommu->aiomt_intr_state = AMD_IOMMU_INTR_HANDLER;
814 815
815 816 intrcap0 = intrcapN = -1;
816 817 if (ddi_intr_get_cap(iommu->aiomt_intr_htable[0], &intrcap0)
817 818 != DDI_SUCCESS ||
818 819 ddi_intr_get_cap(
819 820 iommu->aiomt_intr_htable[iommu->aiomt_actual_intrs - 1], &intrcapN)
820 821 != DDI_SUCCESS || intrcap0 != intrcapN) {
821 822 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
822 823 "ddi_intr_get_cap failed or inconsistent cap among "
823 824 "interrupts: intrcap0 (%d) < intrcapN (%d)",
824 825 f, driver, instance, iommu->aiomt_idx, intrcap0, intrcapN);
825 826 amd_iommu_teardown_interrupts(iommu);
826 827 return (DDI_FAILURE);
827 828 }
828 829 iommu->aiomt_intr_cap = intrcap0;
829 830
830 831 if (intrcap0 & DDI_INTR_FLAG_BLOCK) {
831 832 /* Need to call block enable */
832 833 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
833 834 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
834 835 "Need to call block enable",
835 836 f, driver, instance, iommu->aiomt_idx);
836 837 }
837 838 if (ddi_intr_block_enable(iommu->aiomt_intr_htable,
838 839 iommu->aiomt_actual_intrs) != DDI_SUCCESS) {
839 840 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
840 841 "ddi_intr_block enable failed ", f, driver,
841 842 instance, iommu->aiomt_idx);
842 843 (void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
843 844 iommu->aiomt_actual_intrs);
844 845 amd_iommu_teardown_interrupts(iommu);
845 846 return (DDI_FAILURE);
846 847 }
847 848 } else {
848 849 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
849 850 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
850 851 "Need to call individual enable",
851 852 f, driver, instance, iommu->aiomt_idx);
852 853 }
853 854 for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
854 855 if (ddi_intr_enable(iommu->aiomt_intr_htable[i])
855 856 != DDI_SUCCESS) {
856 857 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
857 858 "ddi_intr_enable failed: intr = %d", f,
858 859 driver, instance, iommu->aiomt_idx, i);
859 860 for (j = 0; j < i; j++) {
860 861 (void) ddi_intr_disable(
861 862 iommu->aiomt_intr_htable[j]);
862 863 }
863 864 amd_iommu_teardown_interrupts(iommu);
864 865 return (DDI_FAILURE);
865 866 }
866 867 }
867 868 }
868 869 iommu->aiomt_intr_state = AMD_IOMMU_INTR_ENABLED;
869 870
870 871 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
871 872 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
872 873 "Interrupts successfully %s enabled. # of interrupts = %d",
873 874 f, driver, instance, iommu->aiomt_idx,
874 875 (intrcap0 & DDI_INTR_FLAG_BLOCK) ? "(block)" :
875 876 "(individually)", iommu->aiomt_actual_intrs);
876 877 }
877 878
878 879 return (DDI_SUCCESS);
879 880 }
880 881
881 882 static void
882 883 amd_iommu_teardown_interrupts(amd_iommu_t *iommu)
883 884 {
884 885 int i;
885 886
886 887 if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ENABLED) {
887 888 if (iommu->aiomt_intr_cap & DDI_INTR_FLAG_BLOCK) {
888 889 (void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
889 890 iommu->aiomt_actual_intrs);
890 891 } else {
891 892 for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
892 893 (void) ddi_intr_disable(
893 894 iommu->aiomt_intr_htable[i]);
894 895 }
895 896 }
896 897 }
897 898
898 899 if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_HANDLER) {
899 900 for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
900 901 (void) ddi_intr_remove_handler(
901 902 iommu->aiomt_intr_htable[i]);
902 903 }
903 904 }
904 905
905 906 if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ALLOCED) {
906 907 for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
907 908 (void) ddi_intr_free(iommu->aiomt_intr_htable[i]);
908 909 }
909 910 }
910 911 if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_TABLE) {
911 912 kmem_free(iommu->aiomt_intr_htable,
912 913 iommu->aiomt_intr_htable_sz);
913 914 }
914 915 iommu->aiomt_intr_htable = NULL;
915 916 iommu->aiomt_intr_htable_sz = 0;
916 917 iommu->aiomt_intr_state = AMD_IOMMU_INTR_INVALID;
917 918 }
918 919
919 920 static amd_iommu_t *
920 921 amd_iommu_init(dev_info_t *dip, ddi_acc_handle_t handle, int idx,
921 922 uint16_t cap_base)
922 923 {
923 924 amd_iommu_t *iommu;
924 925 int instance = ddi_get_instance(dip);
925 926 const char *driver = ddi_driver_name(dip);
926 927 uint32_t caphdr;
927 928 uint32_t low_addr32;
928 929 uint32_t hi_addr32;
929 930 uint32_t range;
930 931 uint32_t misc;
931 932 uint64_t pgoffset;
932 933 amd_iommu_acpi_global_t *global;
933 934 amd_iommu_acpi_ivhd_t *hinfop;
934 935 int bus, device, func;
935 936 const char *f = "amd_iommu_init";
936 937
937 938 low_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
938 939 AMD_IOMMU_CAP_ADDR_LOW_OFF);
939 940 if (!(low_addr32 & AMD_IOMMU_REG_ADDR_LOCKED)) {
940 941 cmn_err(CE_WARN, "%s: %s%d: capability registers not locked. "
941 942 "Unable to use IOMMU unit idx=%d - skipping ...", f, driver,
942 943 instance, idx);
943 944 return (NULL);
944 945 }
945 946
946 947 iommu = kmem_zalloc(sizeof (amd_iommu_t), KM_SLEEP);
947 948 mutex_init(&iommu->aiomt_mutex, NULL, MUTEX_DRIVER, NULL);
948 949 mutex_enter(&iommu->aiomt_mutex);
949 950
950 951 mutex_init(&iommu->aiomt_cmdlock, NULL, MUTEX_DRIVER, NULL);
951 952 mutex_init(&iommu->aiomt_eventlock, NULL, MUTEX_DRIVER, NULL);
952 953
953 954 iommu->aiomt_dip = dip;
954 955 iommu->aiomt_idx = idx;
955 956
956 957 if (acpica_get_bdf(iommu->aiomt_dip, &bus, &device, &func)
957 958 != DDI_SUCCESS) {
958 959 cmn_err(CE_WARN, "%s: %s%d: Failed to get BDF"
959 960 "Unable to use IOMMU unit idx=%d - skipping ...",
960 961 f, driver, instance, idx);
961 962 return (NULL);
962 963 }
963 964
964 965 iommu->aiomt_bdf = ((uint8_t)bus << 8) | ((uint8_t)device << 3) |
965 966 (uint8_t)func;
966 967
967 968 /*
968 969 * Since everything in the capability block is locked and RO at this
969 970 * point, copy everything into the IOMMU struct
970 971 */
971 972
972 973 /* Get cap header */
973 974 caphdr = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_HDR_OFF);
974 975 iommu->aiomt_cap_hdr = caphdr;
975 976 iommu->aiomt_npcache = AMD_IOMMU_REG_GET32(&caphdr,
976 977 AMD_IOMMU_CAP_NPCACHE);
977 978 iommu->aiomt_httun = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_HTTUN);
978 979
979 980 global = amd_iommu_lookup_acpi_global();
980 981 hinfop = amd_iommu_lookup_any_ivhd(iommu);
981 982
982 983 if (hinfop)
983 984 iommu->aiomt_iotlb = hinfop->ach_IotlbSup;
984 985 else
985 986 iommu->aiomt_iotlb =
986 987 AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_IOTLB);
987 988
988 989 iommu->aiomt_captype = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
989 990 iommu->aiomt_capid = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
990 991
991 992 /*
992 993 * Get address of IOMMU control registers
993 994 */
994 995 hi_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
995 996 AMD_IOMMU_CAP_ADDR_HI_OFF);
996 997 iommu->aiomt_low_addr32 = low_addr32;
997 998 iommu->aiomt_hi_addr32 = hi_addr32;
998 999 low_addr32 &= ~AMD_IOMMU_REG_ADDR_LOCKED;
999 1000
1000 1001 if (hinfop) {
1001 1002 iommu->aiomt_reg_pa = hinfop->ach_IOMMU_reg_base;
1002 1003 ASSERT(hinfop->ach_IOMMU_pci_seg == 0);
1003 1004 } else {
1004 1005 iommu->aiomt_reg_pa = ((uint64_t)hi_addr32 << 32 | low_addr32);
1005 1006 }
1006 1007
1007 1008 /*
1008 1009 * Get cap range reg
1009 1010 */
1010 1011 range = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_RANGE_OFF);
1011 1012 iommu->aiomt_range = range;
1012 1013 iommu->aiomt_rng_valid = AMD_IOMMU_REG_GET32(&range,
1013 1014 AMD_IOMMU_RNG_VALID);
1014 1015 if (iommu->aiomt_rng_valid) {
1015 1016 iommu->aiomt_rng_bus = AMD_IOMMU_REG_GET32(&range,
1016 1017 AMD_IOMMU_RNG_BUS);
1017 1018 iommu->aiomt_first_devfn = AMD_IOMMU_REG_GET32(&range,
1018 1019 AMD_IOMMU_FIRST_DEVFN);
1019 1020 iommu->aiomt_last_devfn = AMD_IOMMU_REG_GET32(&range,
1020 1021 AMD_IOMMU_LAST_DEVFN);
1021 1022 } else {
1022 1023 iommu->aiomt_rng_bus = 0;
1023 1024 iommu->aiomt_first_devfn = 0;
1024 1025 iommu->aiomt_last_devfn = 0;
1025 1026 }
1026 1027
1027 1028 if (hinfop)
1028 1029 iommu->aiomt_ht_unitid = hinfop->ach_IOMMU_UnitID;
1029 1030 else
1030 1031 iommu->aiomt_ht_unitid = AMD_IOMMU_REG_GET32(&range,
1031 1032 AMD_IOMMU_HT_UNITID);
1032 1033
1033 1034 /*
1034 1035 * Get cap misc reg
1035 1036 */
1036 1037 misc = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_MISC_OFF);
1037 1038 iommu->aiomt_misc = misc;
1038 1039
1039 1040 if (global) {
1040 1041 iommu->aiomt_htatsresv = global->acg_HtAtsResv;
1041 1042 iommu->aiomt_vasize = global->acg_VAsize;
1042 1043 iommu->aiomt_pasize = global->acg_PAsize;
1043 1044 } else {
1044 1045 iommu->aiomt_htatsresv = AMD_IOMMU_REG_GET32(&misc,
1045 1046 AMD_IOMMU_HT_ATSRSV);
1046 1047 iommu->aiomt_vasize = AMD_IOMMU_REG_GET32(&misc,
1047 1048 AMD_IOMMU_VA_SIZE);
1048 1049 iommu->aiomt_pasize = AMD_IOMMU_REG_GET32(&misc,
1049 1050 AMD_IOMMU_PA_SIZE);
1050 1051 }
1051 1052
1052 1053 if (hinfop) {
1053 1054 iommu->aiomt_msinum = hinfop->ach_IOMMU_MSInum;
1054 1055 } else {
1055 1056 iommu->aiomt_msinum =
1056 1057 AMD_IOMMU_REG_GET32(&misc, AMD_IOMMU_MSINUM);
1057 1058 }
1058 1059
1059 1060 /*
1060 1061 * Set up mapping between control registers PA and VA
1061 1062 */
1062 1063 pgoffset = iommu->aiomt_reg_pa & MMU_PAGEOFFSET;
1063 1064 ASSERT(pgoffset == 0);
1064 1065 iommu->aiomt_reg_pages = mmu_btopr(AMD_IOMMU_REG_SIZE + pgoffset);
1065 1066 iommu->aiomt_reg_size = mmu_ptob(iommu->aiomt_reg_pages);
1066 1067
1067 1068 iommu->aiomt_va = (uintptr_t)device_arena_alloc(
1068 1069 ptob(iommu->aiomt_reg_pages), VM_SLEEP);
1069 1070 if (iommu->aiomt_va == 0) {
1070 1071 cmn_err(CE_WARN, "%s: %s%d: Failed to alloc VA for IOMMU "
1071 1072 "control regs. Skipping IOMMU idx=%d", f, driver,
1072 1073 instance, idx);
1073 1074 mutex_exit(&iommu->aiomt_mutex);
1074 1075 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1075 1076 return (NULL);
1076 1077 }
1077 1078
1078 1079 hat_devload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
1079 1080 iommu->aiomt_reg_size,
1080 1081 mmu_btop(iommu->aiomt_reg_pa), PROT_READ | PROT_WRITE
1081 1082 | HAT_STRICTORDER, HAT_LOAD_LOCK);
1082 1083
1083 1084 iommu->aiomt_reg_va = iommu->aiomt_va + pgoffset;
1084 1085
1085 1086 /*
1086 1087 * Setup the various control register's VA
1087 1088 */
1088 1089 iommu->aiomt_reg_devtbl_va = iommu->aiomt_reg_va +
1089 1090 AMD_IOMMU_DEVTBL_REG_OFF;
1090 1091 iommu->aiomt_reg_cmdbuf_va = iommu->aiomt_reg_va +
1091 1092 AMD_IOMMU_CMDBUF_REG_OFF;
1092 1093 iommu->aiomt_reg_eventlog_va = iommu->aiomt_reg_va +
1093 1094 AMD_IOMMU_EVENTLOG_REG_OFF;
1094 1095 iommu->aiomt_reg_ctrl_va = iommu->aiomt_reg_va +
1095 1096 AMD_IOMMU_CTRL_REG_OFF;
1096 1097 iommu->aiomt_reg_excl_base_va = iommu->aiomt_reg_va +
1097 1098 AMD_IOMMU_EXCL_BASE_REG_OFF;
1098 1099 iommu->aiomt_reg_excl_lim_va = iommu->aiomt_reg_va +
1099 1100 AMD_IOMMU_EXCL_LIM_REG_OFF;
1100 1101 iommu->aiomt_reg_cmdbuf_head_va = iommu->aiomt_reg_va +
1101 1102 AMD_IOMMU_CMDBUF_HEAD_REG_OFF;
1102 1103 iommu->aiomt_reg_cmdbuf_tail_va = iommu->aiomt_reg_va +
1103 1104 AMD_IOMMU_CMDBUF_TAIL_REG_OFF;
1104 1105 iommu->aiomt_reg_eventlog_head_va = iommu->aiomt_reg_va +
1105 1106 AMD_IOMMU_EVENTLOG_HEAD_REG_OFF;
1106 1107 iommu->aiomt_reg_eventlog_tail_va = iommu->aiomt_reg_va +
1107 1108 AMD_IOMMU_EVENTLOG_TAIL_REG_OFF;
1108 1109 iommu->aiomt_reg_status_va = iommu->aiomt_reg_va +
1109 1110 AMD_IOMMU_STATUS_REG_OFF;
1110 1111
1111 1112
1112 1113 /*
1113 1114 * Setup the DEVICE table, CMD buffer, and LOG buffer in
1114 1115 * memory and setup DMA access to this memory location
1115 1116 */
1116 1117 if (amd_iommu_setup_tables_and_buffers(iommu) != DDI_SUCCESS) {
1117 1118 mutex_exit(&iommu->aiomt_mutex);
1118 1119 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1119 1120 return (NULL);
1120 1121 }
1121 1122
1122 1123 if (amd_iommu_setup_exclusion(iommu) != DDI_SUCCESS) {
1123 1124 mutex_exit(&iommu->aiomt_mutex);
1124 1125 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1125 1126 return (NULL);
1126 1127 }
1127 1128
1128 1129 amd_iommu_enable_interrupts(iommu);
1129 1130
1130 1131 if (amd_iommu_setup_interrupts(iommu) != DDI_SUCCESS) {
1131 1132 mutex_exit(&iommu->aiomt_mutex);
1132 1133 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1133 1134 return (NULL);
1134 1135 }
1135 1136
1136 1137 /*
1137 1138 * need to setup domain table before gfx bypass
1138 1139 */
1139 1140 amd_iommu_init_page_tables(iommu);
1140 1141
1141 1142 /*
1142 1143 * Set pass-thru for special devices like IOAPIC and HPET
1143 1144 *
1144 1145 * Also, gfx devices don't use DDI for DMA. No need to register
1145 1146 * before setting up gfx passthru
1146 1147 */
1147 1148 if (amd_iommu_setup_passthru(iommu) != DDI_SUCCESS) {
1148 1149 mutex_exit(&iommu->aiomt_mutex);
1149 1150 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1150 1151 return (NULL);
1151 1152 }
1152 1153
1153 1154 /* Initialize device table entries based on ACPI settings */
1154 1155 if (amd_iommu_acpi_init_devtbl(iommu) != DDI_SUCCESS) {
1155 1156 cmn_err(CE_WARN, "%s: %s%d: Can't initialize device table",
1156 1157 f, driver, instance);
1157 1158 mutex_exit(&iommu->aiomt_mutex);
1158 1159 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1159 1160 return (NULL);
1160 1161 }
1161 1162
1162 1163 if (amd_iommu_start(iommu) != DDI_SUCCESS) {
1163 1164 mutex_exit(&iommu->aiomt_mutex);
1164 1165 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1165 1166 return (NULL);
1166 1167 }
1167 1168
1168 1169 /* xxx register/start race */
1169 1170 if (amd_iommu_register(iommu) != DDI_SUCCESS) {
1170 1171 mutex_exit(&iommu->aiomt_mutex);
1171 1172 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1172 1173 return (NULL);
1173 1174 }
1174 1175
1175 1176 if (amd_iommu_debug) {
1176 1177 cmn_err(CE_NOTE, "%s: %s%d: IOMMU idx=%d inited.", f, driver,
1177 1178 instance, idx);
1178 1179 }
1179 1180
1180 1181 return (iommu);
1181 1182 }
1182 1183
1183 1184 static int
1184 1185 amd_iommu_fini(amd_iommu_t *iommu, int type)
1185 1186 {
1186 1187 int idx = iommu->aiomt_idx;
1187 1188 dev_info_t *dip = iommu->aiomt_dip;
1188 1189 int instance = ddi_get_instance(dip);
1189 1190 const char *driver = ddi_driver_name(dip);
1190 1191 const char *f = "amd_iommu_fini";
1191 1192
1192 1193 if (type == AMD_IOMMU_TEARDOWN) {
1193 1194 mutex_enter(&iommu->aiomt_mutex);
1194 1195 if (amd_iommu_unregister(iommu) != DDI_SUCCESS) {
1195 1196 cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit failed. "
1196 1197 "idx = %d", f, driver, instance, idx);
1197 1198 return (DDI_FAILURE);
1198 1199 }
1199 1200 }
1200 1201
1201 1202 amd_iommu_stop(iommu);
1202 1203
1203 1204 if (type == AMD_IOMMU_TEARDOWN) {
1204 1205 amd_iommu_fini_page_tables(iommu);
1205 1206 amd_iommu_teardown_interrupts(iommu);
1206 1207 amd_iommu_teardown_exclusion(iommu);
1207 1208 }
1208 1209
1209 1210 amd_iommu_teardown_tables_and_buffers(iommu, type);
1210 1211
1211 1212 if (type == AMD_IOMMU_QUIESCE)
1212 1213 return (DDI_SUCCESS);
1213 1214
1214 1215 if (iommu->aiomt_va != 0) {
1215 1216 hat_unload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
1216 1217 iommu->aiomt_reg_size, HAT_UNLOAD_UNLOCK);
1217 1218 device_arena_free((void *)(uintptr_t)iommu->aiomt_va,
1218 1219 ptob(iommu->aiomt_reg_pages));
1219 1220 iommu->aiomt_va = 0;
1220 1221 iommu->aiomt_reg_va = 0;
1221 1222 }
1222 1223 mutex_destroy(&iommu->aiomt_eventlock);
1223 1224 mutex_destroy(&iommu->aiomt_cmdlock);
1224 1225 mutex_exit(&iommu->aiomt_mutex);
1225 1226 mutex_destroy(&iommu->aiomt_mutex);
1226 1227 kmem_free(iommu, sizeof (amd_iommu_t));
1227 1228
1228 1229 cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit complete. idx = %d",
1229 1230 f, driver, instance, idx);
1230 1231
1231 1232 return (DDI_SUCCESS);
1232 1233 }
1233 1234
1234 1235 int
1235 1236 amd_iommu_setup(dev_info_t *dip, amd_iommu_state_t *statep)
1236 1237 {
1237 1238 int instance = ddi_get_instance(dip);
1238 1239 const char *driver = ddi_driver_name(dip);
1239 1240 ddi_acc_handle_t handle;
1240 1241 uint8_t base_class;
1241 1242 uint8_t sub_class;
1242 1243 uint8_t prog_class;
1243 1244 int idx;
1244 1245 uint32_t id;
1245 1246 uint16_t cap_base;
1246 1247 uint32_t caphdr;
1247 1248 uint8_t cap_type;
1248 1249 uint8_t cap_id;
1249 1250 amd_iommu_t *iommu;
1250 1251 const char *f = "amd_iommu_setup";
1251 1252
1252 1253 ASSERT(instance >= 0);
1253 1254 ASSERT(driver);
1254 1255
1255 1256 /* First setup PCI access to config space */
1256 1257
1257 1258 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) {
1258 1259 cmn_err(CE_WARN, "%s: PCI config setup failed: %s%d",
1259 1260 f, driver, instance);
1260 1261 return (DDI_FAILURE);
1261 1262 }
1262 1263
1263 1264 /*
1264 1265 * The AMD IOMMU is part of an independent PCI function. There may be
1265 1266 * more than one IOMMU in that PCI function
1266 1267 */
1267 1268 base_class = pci_config_get8(handle, PCI_CONF_BASCLASS);
1268 1269 sub_class = pci_config_get8(handle, PCI_CONF_SUBCLASS);
1269 1270 prog_class = pci_config_get8(handle, PCI_CONF_PROGCLASS);
1270 1271
1271 1272 if (base_class != PCI_CLASS_PERIPH || sub_class != PCI_PERIPH_IOMMU ||
1272 1273 prog_class != AMD_IOMMU_PCI_PROG_IF) {
1273 1274 cmn_err(CE_WARN, "%s: %s%d: invalid PCI class(0x%x)/"
1274 1275 "subclass(0x%x)/programming interface(0x%x)", f, driver,
1275 1276 instance, base_class, sub_class, prog_class);
1276 1277 pci_config_teardown(&handle);
1277 1278 return (DDI_FAILURE);
1278 1279 }
1279 1280
1280 1281 /*
1281 1282 * Find and initialize all IOMMU units in this function
1282 1283 */
1283 1284 for (idx = 0; ; idx++) {
1284 1285 if (pci_cap_probe(handle, idx, &id, &cap_base) != DDI_SUCCESS)
1285 1286 break;
1286 1287
1287 1288 /* check if cap ID is secure device cap id */
1288 1289 if (id != PCI_CAP_ID_SECURE_DEV) {
1289 1290 if (amd_iommu_debug) {
1290 1291 cmn_err(CE_NOTE,
1291 1292 "%s: %s%d: skipping IOMMU: idx(0x%x) "
1292 1293 "cap ID (0x%x) != secure dev capid (0x%x)",
1293 1294 f, driver, instance, idx, id,
1294 1295 PCI_CAP_ID_SECURE_DEV);
1295 1296 }
1296 1297 continue;
1297 1298 }
1298 1299
1299 1300 /* check if cap type is IOMMU cap type */
1300 1301 caphdr = PCI_CAP_GET32(handle, 0, cap_base,
1301 1302 AMD_IOMMU_CAP_HDR_OFF);
1302 1303 cap_type = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
1303 1304 cap_id = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
1304 1305
1305 1306 if (cap_type != AMD_IOMMU_CAP) {
1306 1307 cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
1307 1308 "cap type (0x%x) != AMD IOMMU CAP (0x%x)", f,
1308 1309 driver, instance, idx, cap_type, AMD_IOMMU_CAP);
1309 1310 continue;
1310 1311 }
1311 1312 ASSERT(cap_id == PCI_CAP_ID_SECURE_DEV);
1312 1313 ASSERT(cap_id == id);
1313 1314
1314 1315 iommu = amd_iommu_init(dip, handle, idx, cap_base);
1315 1316 if (iommu == NULL) {
1316 1317 cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
1317 1318 "failed to init IOMMU", f,
1318 1319 driver, instance, idx);
1319 1320 continue;
1320 1321 }
1321 1322
1322 1323 if (statep->aioms_iommu_start == NULL) {
1323 1324 statep->aioms_iommu_start = iommu;
1324 1325 } else {
1325 1326 statep->aioms_iommu_end->aiomt_next = iommu;
1326 1327 }
1327 1328 statep->aioms_iommu_end = iommu;
1328 1329
1329 1330 statep->aioms_nunits++;
1330 1331 }
1331 1332
1332 1333 pci_config_teardown(&handle);
1333 1334
1334 1335 if (amd_iommu_debug) {
1335 1336 cmn_err(CE_NOTE, "%s: %s%d: state=%p: setup %d IOMMU units",
1336 1337 f, driver, instance, (void *)statep, statep->aioms_nunits);
1337 1338 }
1338 1339
1339 1340 return (DDI_SUCCESS);
1340 1341 }
1341 1342
1342 1343 int
1343 1344 amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep, int type)
1344 1345 {
1345 1346 int instance = ddi_get_instance(dip);
1346 1347 const char *driver = ddi_driver_name(dip);
1347 1348 amd_iommu_t *iommu, *next_iommu;
1348 1349 int teardown;
1349 1350 int error = DDI_SUCCESS;
1350 1351 const char *f = "amd_iommu_teardown";
1351 1352
1352 1353 teardown = 0;
1353 1354 for (iommu = statep->aioms_iommu_start; iommu;
1354 1355 iommu = next_iommu) {
1355 1356 ASSERT(statep->aioms_nunits > 0);
1356 1357 next_iommu = iommu->aiomt_next;
1357 1358 if (amd_iommu_fini(iommu, type) != DDI_SUCCESS) {
1358 1359 error = DDI_FAILURE;
1359 1360 continue;
1360 1361 }
1361 1362 statep->aioms_nunits--;
1362 1363 teardown++;
1363 1364 }
1364 1365
1365 1366 cmn_err(CE_NOTE, "%s: %s%d: state=%p: toredown %d units. "
1366 1367 "%d units left", f, driver, instance, (void *)statep,
1367 1368 teardown, statep->aioms_nunits);
1368 1369
1369 1370 return (error);
1370 1371 }
1371 1372
1372 1373 dev_info_t *
1373 1374 amd_iommu_pci_dip(dev_info_t *rdip, const char *path)
1374 1375 {
1375 1376 dev_info_t *pdip;
1376 1377 const char *driver = ddi_driver_name(rdip);
1377 1378 int instance = ddi_get_instance(rdip);
1378 1379 const char *f = "amd_iommu_pci_dip";
1379 1380
1380 1381 /* Hold rdip so it and its parents don't go away */
1381 1382 ndi_hold_devi(rdip);
1382 1383
1383 1384 if (ddi_is_pci_dip(rdip))
1384 1385 return (rdip);
↓ open down ↓ |
1350 lines elided |
↑ open up ↑ |
1385 1386
1386 1387 pdip = rdip;
1387 1388 while (pdip = ddi_get_parent(pdip)) {
1388 1389 if (ddi_is_pci_dip(pdip)) {
1389 1390 ndi_hold_devi(pdip);
1390 1391 ndi_rele_devi(rdip);
1391 1392 return (pdip);
1392 1393 }
1393 1394 }
1394 1395
1395 - cmn_err(
1396 1396 #ifdef DEBUG
1397 - CE_PANIC,
1397 + cmn_err(CE_PANIC, "%s: %s%d dip = %p has no PCI parent, path = %s",
1398 + f, driver, instance, (void *)rdip, path);
1398 1399 #else
1399 - CE_WARN,
1400 -#endif /* DEBUG */
1401 - "%s: %s%d dip = %p has no PCI parent, path = %s",
1400 + cmn_err(CE_WARN, "%s: %s%d dip = %p has no PCI parent, path = %s",
1402 1401 f, driver, instance, (void *)rdip, path);
1403 -
1404 1402 ndi_rele_devi(rdip);
1403 +#endif /* DEBUG */
1405 1404
1406 1405 return (NULL);
1407 1406 }
1408 1407
1409 1408 /* Interface with IOMMULIB */
1410 1409 /*ARGSUSED*/
1411 1410 static int
1412 1411 amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip)
1413 1412 {
1414 1413 const char *driver = ddi_driver_name(rdip);
1415 1414 char *s;
1416 1415 int bus, device, func, bdf;
1417 1416 amd_iommu_acpi_ivhd_t *hinfop;
1418 1417 dev_info_t *pci_dip;
1419 1418 amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1420 1419 const char *f = "amd_iommu_probe";
1421 1420 int instance = ddi_get_instance(iommu->aiomt_dip);
1422 1421 const char *idriver = ddi_driver_name(iommu->aiomt_dip);
1423 1422 char *path, *pathp;
1424 1423
1425 1424 if (amd_iommu_disable_list) {
1426 1425 s = strstr(amd_iommu_disable_list, driver);
1427 1426 if (s == NULL)
1428 1427 return (DDI_SUCCESS);
1429 1428 if (s == amd_iommu_disable_list || *(s - 1) == ':') {
1430 1429 s += strlen(driver);
1431 1430 if (*s == '\0' || *s == ':') {
1432 1431 amd_iommu_set_passthru(iommu, rdip);
1433 1432 return (DDI_FAILURE);
1434 1433 }
1435 1434 }
1436 1435 }
1437 1436
1438 1437 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1439 1438 if ((pathp = ddi_pathname(rdip, path)) == NULL)
1440 1439 pathp = "<unknown>";
1441 1440
1442 1441 pci_dip = amd_iommu_pci_dip(rdip, path);
1443 1442 if (pci_dip == NULL) {
1444 1443 cmn_err(CE_WARN, "%s: %s%d: idx = %d, failed to get PCI dip "
1445 1444 "for rdip=%p, path = %s",
1446 1445 f, idriver, instance, iommu->aiomt_idx, (void *)rdip,
1447 1446 pathp);
1448 1447 kmem_free(path, MAXPATHLEN);
1449 1448 return (DDI_FAILURE);
1450 1449 }
1451 1450
1452 1451 if (acpica_get_bdf(pci_dip, &bus, &device, &func) != DDI_SUCCESS) {
1453 1452 cmn_err(CE_WARN, "%s: %s%d: idx = %d, failed to get BDF "
1454 1453 "for rdip=%p, path = %s",
1455 1454 f, idriver, instance, iommu->aiomt_idx, (void *)rdip,
1456 1455 pathp);
1457 1456 kmem_free(path, MAXPATHLEN);
1458 1457 return (DDI_FAILURE);
1459 1458 }
1460 1459 kmem_free(path, MAXPATHLEN);
1461 1460
1462 1461 /*
1463 1462 * See whether device is described by IVRS as being managed
1464 1463 * by this IOMMU
1465 1464 */
1466 1465 bdf = ((uint8_t)bus << 8) | ((uint8_t)device << 3) | (uint8_t)func;
1467 1466 hinfop = amd_iommu_lookup_ivhd(bdf);
1468 1467 if (hinfop && hinfop->ach_IOMMU_deviceid == iommu->aiomt_bdf)
1469 1468 return (DDI_SUCCESS);
1470 1469
1471 1470 return (DDI_FAILURE);
1472 1471 }
1473 1472
1474 1473 /*ARGSUSED*/
1475 1474 static int
1476 1475 amd_iommu_allochdl(iommulib_handle_t handle,
1477 1476 dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1478 1477 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep)
1479 1478 {
1480 1479 return (iommulib_iommu_dma_allochdl(dip, rdip, attr, waitfp,
1481 1480 arg, dma_handlep));
1482 1481 }
1483 1482
1484 1483 /*ARGSUSED*/
1485 1484 static int
1486 1485 amd_iommu_freehdl(iommulib_handle_t handle,
1487 1486 dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
1488 1487 {
1489 1488 return (iommulib_iommu_dma_freehdl(dip, rdip, dma_handle));
1490 1489 }
1491 1490
1492 1491 /*ARGSUSED*/
1493 1492 static int
1494 1493 map_current_window(amd_iommu_t *iommu, dev_info_t *rdip, ddi_dma_attr_t *attrp,
1495 1494 struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookie_array, uint_t ccount,
1496 1495 int km_flags)
1497 1496 {
1498 1497 const char *driver = ddi_driver_name(iommu->aiomt_dip);
1499 1498 int instance = ddi_get_instance(iommu->aiomt_dip);
1500 1499 int idx = iommu->aiomt_idx;
1501 1500 int i;
1502 1501 uint64_t start_va;
1503 1502 char *path;
1504 1503 int error = DDI_FAILURE;
1505 1504 const char *f = "map_current_window";
1506 1505
1507 1506 path = kmem_alloc(MAXPATHLEN, km_flags);
1508 1507 if (path == NULL) {
1509 1508 return (DDI_DMA_NORESOURCES);
1510 1509 }
1511 1510
1512 1511 (void) ddi_pathname(rdip, path);
1513 1512 mutex_enter(&amd_iommu_pgtable_lock);
1514 1513
1515 1514 if (amd_iommu_debug == AMD_IOMMU_DEBUG_PAGE_TABLES) {
1516 1515 cmn_err(CE_NOTE, "%s: %s%d: idx=%d Attempting to get cookies "
1517 1516 "from handle for device %s",
1518 1517 f, driver, instance, idx, path);
1519 1518 }
1520 1519
1521 1520 start_va = 0;
1522 1521 for (i = 0; i < ccount; i++) {
1523 1522 if ((error = amd_iommu_map_pa2va(iommu, rdip, attrp, dmareq,
1524 1523 cookie_array[i].dmac_cookie_addr,
1525 1524 cookie_array[i].dmac_size,
1526 1525 AMD_IOMMU_VMEM_MAP, &start_va, km_flags)) != DDI_SUCCESS) {
1527 1526 break;
1528 1527 }
1529 1528 cookie_array[i].dmac_cookie_addr = (uintptr_t)start_va;
1530 1529 cookie_array[i].dmac_type = 0;
1531 1530 }
1532 1531
1533 1532 if (i != ccount) {
1534 1533 cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot map cookie# %d "
1535 1534 "for device %s", f, driver, instance, idx, i, path);
1536 1535 (void) unmap_current_window(iommu, rdip, cookie_array,
1537 1536 ccount, i, 1);
1538 1537 goto out;
1539 1538 }
1540 1539
1541 1540 if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
1542 1541 cmn_err(CE_NOTE, "%s: return SUCCESS", f);
1543 1542 }
1544 1543
1545 1544 error = DDI_DMA_MAPPED;
1546 1545 out:
1547 1546 mutex_exit(&amd_iommu_pgtable_lock);
1548 1547 kmem_free(path, MAXPATHLEN);
1549 1548 return (error);
1550 1549 }
1551 1550
1552 1551 /*ARGSUSED*/
1553 1552 static int
1554 1553 unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
1555 1554 ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked)
1556 1555 {
1557 1556 const char *driver = ddi_driver_name(iommu->aiomt_dip);
1558 1557 int instance = ddi_get_instance(iommu->aiomt_dip);
1559 1558 int idx = iommu->aiomt_idx;
1560 1559 int i;
1561 1560 int error = DDI_FAILURE;
1562 1561 char *path;
1563 1562 int pathfree;
1564 1563 const char *f = "unmap_current_window";
1565 1564
1566 1565 if (!locked)
1567 1566 mutex_enter(&amd_iommu_pgtable_lock);
1568 1567
1569 1568 path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
1570 1569 if (path) {
1571 1570 (void) ddi_pathname(rdip, path);
1572 1571 pathfree = 1;
1573 1572 } else {
1574 1573 path = "<path-mem-alloc-failed>";
1575 1574 pathfree = 0;
1576 1575 }
1577 1576
1578 1577 if (ncookies == -1)
1579 1578 ncookies = ccount;
1580 1579
1581 1580 for (i = 0; i < ncookies; i++) {
1582 1581 if (amd_iommu_unmap_va(iommu, rdip,
1583 1582 cookie_array[i].dmac_cookie_addr,
1584 1583 cookie_array[i].dmac_size,
1585 1584 AMD_IOMMU_VMEM_MAP) != DDI_SUCCESS) {
1586 1585 break;
1587 1586 }
1588 1587 }
1589 1588
1590 1589 if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_COMPL_WAIT, NULL, 0, 0)
1591 1590 != DDI_SUCCESS) {
1592 1591 cmn_err(CE_WARN, "%s: AMD IOMMU completion wait failed for: %s",
1593 1592 f, path);
1594 1593 }
1595 1594
1596 1595 if (i != ncookies) {
1597 1596 cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot unmap cookie# %d "
1598 1597 "for device %s", f, driver, instance, idx, i, path);
1599 1598 error = DDI_FAILURE;
1600 1599 goto out;
1601 1600 }
1602 1601
1603 1602 error = DDI_SUCCESS;
1604 1603
1605 1604 out:
1606 1605 if (pathfree)
1607 1606 kmem_free(path, MAXPATHLEN);
1608 1607 if (!locked)
1609 1608 mutex_exit(&amd_iommu_pgtable_lock);
1610 1609 return (error);
1611 1610 }
1612 1611
1613 1612 /*ARGSUSED*/
1614 1613 static int
1615 1614 amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
1616 1615 dev_info_t *rdip, ddi_dma_handle_t dma_handle,
1617 1616 struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
1618 1617 uint_t *ccountp)
1619 1618 {
1620 1619 int dma_error = DDI_DMA_NOMAPPING;
1621 1620 int error;
1622 1621 char *path;
1623 1622 ddi_dma_cookie_t *cookie_array = NULL;
1624 1623 uint_t ccount = 0;
1625 1624 ddi_dma_impl_t *hp;
1626 1625 ddi_dma_attr_t *attrp;
1627 1626 int km_flags;
1628 1627 amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1629 1628 int instance = ddi_get_instance(rdip);
1630 1629 const char *driver = ddi_driver_name(rdip);
1631 1630 const char *f = "amd_iommu_bindhdl";
1632 1631
1633 1632 dma_error = iommulib_iommu_dma_bindhdl(dip, rdip, dma_handle,
1634 1633 dmareq, cookiep, ccountp);
1635 1634
1636 1635 if (dma_error != DDI_DMA_MAPPED && dma_error != DDI_DMA_PARTIAL_MAP)
1637 1636 return (dma_error);
1638 1637
1639 1638 km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
1640 1639
1641 1640 path = kmem_alloc(MAXPATHLEN, km_flags);
1642 1641 if (path) {
1643 1642 (void) ddi_pathname(rdip, path);
1644 1643 } else {
1645 1644 dma_error = DDI_DMA_NORESOURCES;
1646 1645 goto unbind;
1647 1646 }
1648 1647
1649 1648 if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
1650 1649 cmn_err(CE_NOTE, "%s: %s got cookie (%p), #cookies: %d",
1651 1650 f, path,
1652 1651 (void *)cookiep->dmac_cookie_addr,
1653 1652 *ccountp);
1654 1653 }
1655 1654
1656 1655 cookie_array = NULL;
1657 1656 ccount = 0;
1658 1657 if ((error = iommulib_iommu_dma_get_cookies(dip, dma_handle,
1659 1658 &cookie_array, &ccount)) != DDI_SUCCESS) {
1660 1659 cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1661 1660 "for device %s", f, driver, instance, path);
1662 1661 dma_error = error;
1663 1662 goto unbind;
1664 1663 }
1665 1664
1666 1665 hp = (ddi_dma_impl_t *)dma_handle;
1667 1666 attrp = &hp->dmai_attr;
1668 1667
1669 1668 error = map_current_window(iommu, rdip, attrp, dmareq,
1670 1669 cookie_array, ccount, km_flags);
1671 1670 if (error != DDI_SUCCESS) {
1672 1671 dma_error = error;
1673 1672 goto unbind;
1674 1673 }
1675 1674
1676 1675 if ((error = iommulib_iommu_dma_set_cookies(dip, dma_handle,
1677 1676 cookie_array, ccount)) != DDI_SUCCESS) {
1678 1677 cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
1679 1678 "for device %s", f, driver, instance, path);
1680 1679 dma_error = error;
1681 1680 goto unbind;
1682 1681 }
1683 1682
1684 1683 *cookiep = cookie_array[0];
1685 1684
1686 1685 if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
1687 1686 cmn_err(CE_NOTE, "%s: %s remapped cookie (%p), #cookies: %d",
1688 1687 f, path,
1689 1688 (void *)(uintptr_t)cookiep->dmac_cookie_addr,
1690 1689 *ccountp);
1691 1690 }
1692 1691
1693 1692 kmem_free(path, MAXPATHLEN);
1694 1693 ASSERT(dma_error == DDI_DMA_MAPPED || dma_error == DDI_DMA_PARTIAL_MAP);
1695 1694 return (dma_error);
1696 1695 unbind:
1697 1696 kmem_free(path, MAXPATHLEN);
1698 1697 (void) iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle);
1699 1698 return (dma_error);
1700 1699 }
1701 1700
1702 1701 /*ARGSUSED*/
1703 1702 static int
1704 1703 amd_iommu_unbindhdl(iommulib_handle_t handle,
1705 1704 dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
1706 1705 {
1707 1706 amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1708 1707 ddi_dma_cookie_t *cookie_array = NULL;
1709 1708 uint_t ccount = 0;
1710 1709 int error = DDI_FAILURE;
1711 1710 int instance = ddi_get_instance(rdip);
1712 1711 const char *driver = ddi_driver_name(rdip);
1713 1712 const char *f = "amd_iommu_unbindhdl";
1714 1713
1715 1714 cookie_array = NULL;
1716 1715 ccount = 0;
1717 1716 if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1718 1717 &ccount) != DDI_SUCCESS) {
1719 1718 cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1720 1719 "for device %p", f, driver, instance, (void *)rdip);
1721 1720 error = DDI_FAILURE;
1722 1721 goto out;
1723 1722 }
1724 1723
1725 1724 if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1726 1725 cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
1727 1726 "for device %p", f, driver, instance, (void *)rdip);
1728 1727 error = DDI_FAILURE;
1729 1728 goto out;
1730 1729 }
1731 1730
1732 1731 if (iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle)
1733 1732 != DDI_SUCCESS) {
1734 1733 cmn_err(CE_WARN, "%s: %s%d: failed to unbindhdl for dip=%p",
1735 1734 f, driver, instance, (void *)rdip);
1736 1735 error = DDI_FAILURE;
1737 1736 goto out;
1738 1737 }
1739 1738
1740 1739 if (unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0)
1741 1740 != DDI_SUCCESS) {
1742 1741 cmn_err(CE_WARN, "%s: %s%d: failed to unmap current window "
1743 1742 "for dip=%p", f, driver, instance, (void *)rdip);
1744 1743 error = DDI_FAILURE;
1745 1744 } else {
1746 1745 error = DDI_SUCCESS;
1747 1746 }
1748 1747 out:
1749 1748 if (cookie_array)
1750 1749 kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1751 1750 return (error);
1752 1751 }
1753 1752
1754 1753 /*ARGSUSED*/
1755 1754 static int
1756 1755 amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
1757 1756 dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
1758 1757 size_t len, uint_t cache_flags)
1759 1758 {
1760 1759 ddi_dma_cookie_t *cookie_array = NULL;
1761 1760 uint_t ccount = 0;
1762 1761 int error;
1763 1762 const char *f = "amd_iommu_sync";
1764 1763
1765 1764 cookie_array = NULL;
1766 1765 ccount = 0;
1767 1766 if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1768 1767 &ccount) != DDI_SUCCESS) {
1769 1768 ASSERT(cookie_array == NULL);
1770 1769 cmn_err(CE_WARN, "%s: Cannot get cookies "
1771 1770 "for device %p", f, (void *)rdip);
1772 1771 error = DDI_FAILURE;
1773 1772 goto out;
1774 1773 }
1775 1774
1776 1775 if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1777 1776 cmn_err(CE_WARN, "%s: Cannot clear cookies "
1778 1777 "for device %p", f, (void *)rdip);
1779 1778 error = DDI_FAILURE;
1780 1779 goto out;
1781 1780 }
1782 1781
1783 1782 error = iommulib_iommu_dma_sync(dip, rdip, dma_handle, off,
1784 1783 len, cache_flags);
1785 1784
1786 1785 if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
1787 1786 ccount) != DDI_SUCCESS) {
1788 1787 cmn_err(CE_WARN, "%s: Cannot set cookies "
1789 1788 "for device %p", f, (void *)rdip);
1790 1789 error = DDI_FAILURE;
1791 1790 } else {
1792 1791 cookie_array = NULL;
1793 1792 ccount = 0;
1794 1793 }
1795 1794
1796 1795 out:
1797 1796 if (cookie_array)
1798 1797 kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1799 1798 return (error);
1800 1799 }
1801 1800
1802 1801 /*ARGSUSED*/
1803 1802 static int
1804 1803 amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
1805 1804 dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
1806 1805 off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
1807 1806 uint_t *ccountp)
1808 1807 {
1809 1808 int error = DDI_FAILURE;
1810 1809 amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1811 1810 ddi_dma_cookie_t *cookie_array = NULL;
1812 1811 uint_t ccount = 0;
1813 1812 int km_flags;
1814 1813 ddi_dma_impl_t *hp;
1815 1814 ddi_dma_attr_t *attrp;
1816 1815 struct ddi_dma_req sdmareq = {0};
1817 1816 int instance = ddi_get_instance(rdip);
1818 1817 const char *driver = ddi_driver_name(rdip);
1819 1818 const char *f = "amd_iommu_win";
1820 1819
1821 1820 km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
1822 1821
1823 1822 cookie_array = NULL;
1824 1823 ccount = 0;
1825 1824 if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1826 1825 &ccount) != DDI_SUCCESS) {
1827 1826 cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1828 1827 "for device %p", f, driver, instance, (void *)rdip);
1829 1828 error = DDI_FAILURE;
1830 1829 goto out;
1831 1830 }
1832 1831
1833 1832 if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1834 1833 cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
1835 1834 "for device %p", f, driver, instance, (void *)rdip);
1836 1835 error = DDI_FAILURE;
1837 1836 goto out;
1838 1837 }
1839 1838
1840 1839 if (iommulib_iommu_dma_win(dip, rdip, dma_handle, win,
1841 1840 offp, lenp, cookiep, ccountp) != DDI_SUCCESS) {
1842 1841 cmn_err(CE_WARN, "%s: %s%d: failed switch windows for dip=%p",
1843 1842 f, driver, instance, (void *)rdip);
1844 1843 error = DDI_FAILURE;
1845 1844 goto out;
1846 1845 }
1847 1846
1848 1847 (void) unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0);
1849 1848
1850 1849 if (cookie_array) {
1851 1850 kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1852 1851 cookie_array = NULL;
1853 1852 ccount = 0;
1854 1853 }
1855 1854
1856 1855 cookie_array = NULL;
1857 1856 ccount = 0;
1858 1857 if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1859 1858 &ccount) != DDI_SUCCESS) {
1860 1859 cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1861 1860 "for device %p", f, driver, instance, (void *)rdip);
1862 1861 error = DDI_FAILURE;
1863 1862 goto out;
1864 1863 }
1865 1864
1866 1865 hp = (ddi_dma_impl_t *)dma_handle;
1867 1866 attrp = &hp->dmai_attr;
1868 1867
1869 1868 sdmareq.dmar_flags = DDI_DMA_RDWR;
1870 1869 error = map_current_window(iommu, rdip, attrp, &sdmareq,
1871 1870 cookie_array, ccount, km_flags);
1872 1871
1873 1872 if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
1874 1873 ccount) != DDI_SUCCESS) {
1875 1874 cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
1876 1875 "for device %p", f, driver, instance, (void *)rdip);
1877 1876 error = DDI_FAILURE;
1878 1877 goto out;
1879 1878 }
1880 1879
1881 1880 *cookiep = cookie_array[0];
1882 1881
1883 1882 return (error == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1884 1883 out:
1885 1884 if (cookie_array)
1886 1885 kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1887 1886
1888 1887 return (error);
1889 1888 }
1890 1889
1891 1890 /*ARGSUSED*/
1892 1891 static int
1893 1892 amd_iommu_mapobject(iommulib_handle_t handle, dev_info_t *dip,
1894 1893 dev_info_t *rdip, ddi_dma_handle_t dma_handle,
1895 1894 struct ddi_dma_req *dmareq, ddi_dma_obj_t *dmao)
1896 1895 {
1897 1896 return (DDI_ENOTSUP);
1898 1897 }
1899 1898
1900 1899 /*ARGSUSED*/
1901 1900 static int
1902 1901 amd_iommu_unmapobject(iommulib_handle_t handle, dev_info_t *dip,
1903 1902 dev_info_t *rdip, ddi_dma_handle_t dma_handle, ddi_dma_obj_t *dmao)
1904 1903 {
1905 1904 return (DDI_ENOTSUP);
1906 1905 }
1907 1906
1908 1907 uint64_t
1909 1908 amd_iommu_reg_get64_workaround(uint64_t *regp, uint32_t bits)
1910 1909 {
1911 1910 split_t s;
1912 1911 uint32_t *ptr32 = (uint32_t *)regp;
1913 1912 uint64_t *s64p = &(s.u64);
1914 1913
1915 1914 s.u32[0] = ptr32[0];
1916 1915 s.u32[1] = ptr32[1];
1917 1916
1918 1917 return (AMD_IOMMU_REG_GET64_IMPL(s64p, bits));
1919 1918 }
1920 1919
1921 1920 uint64_t
1922 1921 amd_iommu_reg_set64_workaround(uint64_t *regp, uint32_t bits, uint64_t value)
1923 1922 {
1924 1923 split_t s;
1925 1924 uint32_t *ptr32 = (uint32_t *)regp;
1926 1925 uint64_t *s64p = &(s.u64);
1927 1926
1928 1927 s.u32[0] = ptr32[0];
1929 1928 s.u32[1] = ptr32[1];
1930 1929
1931 1930 AMD_IOMMU_REG_SET64_IMPL(s64p, bits, value);
1932 1931
1933 1932 *regp = s.u64;
1934 1933
1935 1934 return (s.u64);
1936 1935 }
1937 1936
1938 1937 void
1939 1938 amd_iommu_read_boot_props(void)
1940 1939 {
1941 1940 char *propval;
1942 1941
1943 1942 /*
1944 1943 * if "amd-iommu = no/false" boot property is set,
1945 1944 * ignore AMD iommu
1946 1945 */
1947 1946 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
1948 1947 DDI_PROP_DONTPASS, "amd-iommu", &propval) == DDI_SUCCESS) {
1949 1948 if (strcmp(propval, "no") == 0 ||
1950 1949 strcmp(propval, "false") == 0) {
1951 1950 amd_iommu_disable = 1;
1952 1951 }
1953 1952 ddi_prop_free(propval);
1954 1953 }
1955 1954
1956 1955 /*
1957 1956 * Copy the list of drivers for which IOMMU is disabled by user.
1958 1957 */
1959 1958 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
1960 1959 DDI_PROP_DONTPASS, "amd-iommu-disable-list", &propval)
1961 1960 == DDI_SUCCESS) {
1962 1961 amd_iommu_disable_list = kmem_alloc(strlen(propval) + 1,
1963 1962 KM_SLEEP);
1964 1963 (void) strcpy(amd_iommu_disable_list, propval);
1965 1964 ddi_prop_free(propval);
1966 1965 }
1967 1966
1968 1967 }
1969 1968
1970 1969 void
1971 1970 amd_iommu_lookup_conf_props(dev_info_t *dip)
1972 1971 {
1973 1972 char *disable;
1974 1973
1975 1974 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1976 1975 DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu", &disable)
1977 1976 == DDI_PROP_SUCCESS) {
1978 1977 if (strcmp(disable, "no") == 0) {
1979 1978 amd_iommu_disable = 1;
1980 1979 }
1981 1980 ddi_prop_free(disable);
1982 1981 }
1983 1982
1984 1983 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1985 1984 DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu-disable-list",
1986 1985 &disable) == DDI_PROP_SUCCESS) {
1987 1986 amd_iommu_disable_list = kmem_alloc(strlen(disable) + 1,
1988 1987 KM_SLEEP);
1989 1988 (void) strcpy(amd_iommu_disable_list, disable);
1990 1989 ddi_prop_free(disable);
1991 1990 }
1992 1991 }
↓ open down ↓ |
578 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX