Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/io/xsvc/xsvc.c
+++ new/usr/src/uts/i86pc/io/xsvc/xsvc.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #include <sys/errno.h>
28 28 #include <sys/types.h>
29 29 #include <sys/conf.h>
30 30 #include <sys/kmem.h>
31 31 #include <sys/ddi.h>
32 32 #include <sys/stat.h>
33 33 #include <sys/sunddi.h>
34 34 #include <sys/file.h>
35 35 #include <sys/open.h>
36 36 #include <sys/modctl.h>
37 37 #include <sys/ddi_impldefs.h>
38 38 #include <vm/seg_kmem.h>
39 39 #include <sys/vmsystm.h>
40 40 #include <sys/sysmacros.h>
41 41 #include <sys/ddidevmap.h>
42 42 #include <sys/avl.h>
43 43 #ifdef __xpv
44 44 #include <sys/hypervisor.h>
45 45 #endif
46 46
47 47 #include <sys/xsvc.h>
48 48
49 49 /* total max memory which can be alloced with ioctl interface */
50 50 uint64_t xsvc_max_memory = 10 * 1024 * 1024;
51 51
52 52 extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr);
53 53
54 54
55 55 static int xsvc_open(dev_t *devp, int flag, int otyp, cred_t *cred);
56 56 static int xsvc_close(dev_t devp, int flag, int otyp, cred_t *cred);
57 57 static int xsvc_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred,
58 58 int *rval);
59 59 static int xsvc_devmap(dev_t dev, devmap_cookie_t dhp, offset_t off, size_t len,
60 60 size_t *maplen, uint_t model);
61 61 static int xsvc_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
62 62 static int xsvc_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
63 63 static int xsvc_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
64 64 void **result);
65 65
66 66 static struct cb_ops xsvc_cb_ops = {
67 67 xsvc_open, /* cb_open */
68 68 xsvc_close, /* cb_close */
69 69 nodev, /* cb_strategy */
70 70 nodev, /* cb_print */
71 71 nodev, /* cb_dump */
72 72 nodev, /* cb_read */
73 73 nodev, /* cb_write */
74 74 xsvc_ioctl, /* cb_ioctl */
75 75 xsvc_devmap, /* cb_devmap */
76 76 NULL, /* cb_mmap */
77 77 NULL, /* cb_segmap */
78 78 nochpoll, /* cb_chpoll */
79 79 ddi_prop_op, /* cb_prop_op */
80 80 NULL, /* cb_stream */
81 81 D_NEW | D_MP | D_64BIT | D_DEVMAP, /* cb_flag */
82 82 CB_REV
83 83 };
84 84
85 85 static struct dev_ops xsvc_dev_ops = {
86 86 DEVO_REV, /* devo_rev */
87 87 0, /* devo_refcnt */
88 88 xsvc_getinfo, /* devo_getinfo */
89 89 nulldev, /* devo_identify */
90 90 nulldev, /* devo_probe */
91 91 xsvc_attach, /* devo_attach */
92 92 xsvc_detach, /* devo_detach */
93 93 nodev, /* devo_reset */
94 94 &xsvc_cb_ops, /* devo_cb_ops */
95 95 NULL, /* devo_bus_ops */
96 96 NULL, /* power */
97 97 ddi_quiesce_not_needed, /* quiesce */
↓ open down ↓ |
97 lines elided |
↑ open up ↑ |
98 98 };
99 99
100 100 static struct modldrv xsvc_modldrv = {
101 101 &mod_driverops, /* Type of module. This one is a driver */
102 102 "xsvc driver", /* Name of the module. */
103 103 &xsvc_dev_ops, /* driver ops */
104 104 };
105 105
106 106 static struct modlinkage xsvc_modlinkage = {
107 107 MODREV_1,
108 - (void *) &xsvc_modldrv,
109 - NULL
108 + { (void *) &xsvc_modldrv, NULL }
110 109 };
111 110
112 111
113 112 static int xsvc_ioctl_alloc_memory(xsvc_state_t *state, void *arg, int mode);
114 113 static int xsvc_ioctl_flush_memory(xsvc_state_t *state, void *arg, int mode);
115 114 static int xsvc_ioctl_free_memory(xsvc_state_t *state, void *arg, int mode);
116 115 static int xsvc_mem_alloc(xsvc_state_t *state, uint64_t key,
117 116 xsvc_mem_t **mp);
118 117 static void xsvc_mem_free(xsvc_state_t *state, xsvc_mem_t *mp);
119 118 static xsvc_mem_t *xsvc_mem_lookup(xsvc_state_t *state,
120 119 uint64_t key);
121 120 static int xsvc_mnode_key_compare(const void *q, const void *e);
122 121 static int xsvc_umem_cookie_alloc(caddr_t kva, size_t size, int flags,
123 122 ddi_umem_cookie_t *cookiep);
124 123 static void xsvc_umem_cookie_free(ddi_umem_cookie_t *cookiep);
125 124
126 125
127 126 void *xsvc_statep;
128 127
129 128 static ddi_device_acc_attr_t xsvc_device_attr = {
130 129 DDI_DEVICE_ATTR_V0,
131 130 DDI_NEVERSWAP_ACC,
132 131 DDI_STRICTORDER_ACC
133 132 };
134 133
135 134 static int xsvc_devmap_map(devmap_cookie_t dhp, dev_t dev, uint_t flags,
136 135 offset_t off, size_t len, void **pvtp);
137 136 static int xsvc_devmap_dup(devmap_cookie_t dhp, void *pvtp,
138 137 devmap_cookie_t new_dhp, void **new_pvtp);
139 138 static void xsvc_devmap_unmap(devmap_cookie_t dhp, void *pvtp, offset_t off,
140 139 size_t len, devmap_cookie_t new_dhp1, void **new_pvtp1,
141 140 devmap_cookie_t new_dhp2, void **new_pvtp2);
142 141
143 142
144 143 static struct devmap_callback_ctl xsvc_callbk = {
145 144 DEVMAP_OPS_REV,
146 145 xsvc_devmap_map,
147 146 NULL,
148 147 xsvc_devmap_dup,
149 148 xsvc_devmap_unmap
150 149 };
151 150
152 151
153 152 /*
154 153 * _init()
155 154 *
156 155 */
157 156 int
158 157 _init(void)
159 158 {
160 159 int err;
161 160
162 161 err = ddi_soft_state_init(&xsvc_statep, sizeof (xsvc_state_t), 1);
163 162 if (err != 0) {
164 163 return (err);
165 164 }
166 165
167 166 err = mod_install(&xsvc_modlinkage);
168 167 if (err != 0) {
169 168 ddi_soft_state_fini(&xsvc_statep);
170 169 return (err);
171 170 }
172 171
173 172 return (0);
174 173 }
175 174
176 175 /*
177 176 * _info()
178 177 *
179 178 */
180 179 int
181 180 _info(struct modinfo *modinfop)
182 181 {
183 182 return (mod_info(&xsvc_modlinkage, modinfop));
184 183 }
185 184
186 185 /*
187 186 * _fini()
188 187 *
189 188 */
190 189 int
191 190 _fini(void)
192 191 {
193 192 int err;
194 193
195 194 err = mod_remove(&xsvc_modlinkage);
196 195 if (err != 0) {
197 196 return (err);
198 197 }
199 198
200 199 ddi_soft_state_fini(&xsvc_statep);
201 200
202 201 return (0);
203 202 }
204 203
205 204 /*
206 205 * xsvc_attach()
207 206 *
208 207 */
209 208 static int
210 209 xsvc_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
211 210 {
212 211 xsvc_state_t *state;
213 212 int maxallocmem;
214 213 int instance;
215 214 int err;
216 215
217 216
218 217 switch (cmd) {
219 218 case DDI_ATTACH:
220 219 break;
221 220
222 221 case DDI_RESUME:
223 222 return (DDI_SUCCESS);
224 223
225 224 default:
226 225 return (DDI_FAILURE);
227 226 }
228 227
229 228 instance = ddi_get_instance(dip);
230 229 err = ddi_soft_state_zalloc(xsvc_statep, instance);
231 230 if (err != DDI_SUCCESS) {
232 231 return (DDI_FAILURE);
233 232 }
234 233 state = ddi_get_soft_state(xsvc_statep, instance);
235 234 if (state == NULL) {
236 235 goto attachfail_get_soft_state;
237 236 }
238 237
239 238 state->xs_dip = dip;
240 239 state->xs_instance = instance;
241 240
242 241 /* Initialize allocation count */
243 242 mutex_init(&state->xs_mutex, NULL, MUTEX_DRIVER, NULL);
244 243 state->xs_currently_alloced = 0;
245 244
246 245 mutex_init(&state->xs_cookie_mutex, NULL, MUTEX_DRIVER, NULL);
247 246
248 247 /* create the minor node (for the ioctl) */
249 248 err = ddi_create_minor_node(dip, "xsvc", S_IFCHR, instance, DDI_PSEUDO,
250 249 0);
251 250 if (err != DDI_SUCCESS) {
252 251 goto attachfail_minor_node;
253 252 }
254 253
255 254 /*
256 255 * the maxallocmem property will override the default (xsvc_max_memory).
257 256 * This is the maximum total memory the ioctl will allow to be alloced.
258 257 */
259 258 maxallocmem = ddi_prop_get_int(DDI_DEV_T_ANY, state->xs_dip,
260 259 DDI_PROP_DONTPASS, "maxallocmem", -1);
261 260 if (maxallocmem >= 0) {
262 261 xsvc_max_memory = maxallocmem * 1024;
263 262 }
264 263
265 264 /* Initialize list of memory allocs */
266 265 mutex_init(&state->xs_mlist.ml_mutex, NULL, MUTEX_DRIVER, NULL);
267 266 avl_create(&state->xs_mlist.ml_avl, xsvc_mnode_key_compare,
268 267 sizeof (xsvc_mnode_t), offsetof(xsvc_mnode_t, mn_link));
269 268
270 269 /* Report that driver was loaded */
271 270 ddi_report_dev(dip);
272 271
273 272 return (DDI_SUCCESS);
274 273
275 274 attachfail_minor_node:
276 275 mutex_destroy(&state->xs_cookie_mutex);
277 276 mutex_destroy(&state->xs_mutex);
278 277 attachfail_get_soft_state:
279 278 (void) ddi_soft_state_free(xsvc_statep, instance);
280 279
281 280 return (err);
282 281 }
283 282
284 283 /*
285 284 * xsvc_detach()
286 285 *
287 286 */
288 287 static int
289 288 xsvc_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
290 289 {
291 290 xsvc_state_t *state;
292 291 xsvc_mnode_t *mnode;
293 292 xsvc_mem_t *mp;
294 293 int instance;
295 294
296 295
297 296 instance = ddi_get_instance(dip);
298 297 state = ddi_get_soft_state(xsvc_statep, instance);
299 298 if (state == NULL) {
300 299 return (DDI_FAILURE);
301 300 }
302 301
303 302 switch (cmd) {
304 303 case DDI_DETACH:
305 304 break;
306 305
307 306 case DDI_SUSPEND:
308 307 return (DDI_SUCCESS);
309 308
310 309 default:
311 310 return (DDI_FAILURE);
312 311 }
313 312
314 313 ddi_remove_minor_node(dip, NULL);
315 314
316 315 /* Free any memory on list */
317 316 while ((mnode = avl_first(&state->xs_mlist.ml_avl)) != NULL) {
318 317 mp = mnode->mn_home;
319 318 xsvc_mem_free(state, mp);
320 319 }
321 320
322 321 /* remove list */
323 322 avl_destroy(&state->xs_mlist.ml_avl);
324 323 mutex_destroy(&state->xs_mlist.ml_mutex);
325 324
326 325 mutex_destroy(&state->xs_cookie_mutex);
327 326 mutex_destroy(&state->xs_mutex);
328 327 (void) ddi_soft_state_free(xsvc_statep, state->xs_instance);
329 328 return (DDI_SUCCESS);
330 329 }
331 330
332 331 /*
333 332 * xsvc_getinfo()
334 333 *
335 334 */
336 335 /*ARGSUSED*/
337 336 static int
338 337 xsvc_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
339 338 {
340 339 xsvc_state_t *state;
341 340 int instance;
342 341 dev_t dev;
343 342 int err;
344 343
345 344
346 345 dev = (dev_t)arg;
347 346 instance = getminor(dev);
348 347
349 348 switch (cmd) {
350 349 case DDI_INFO_DEVT2DEVINFO:
351 350 state = ddi_get_soft_state(xsvc_statep, instance);
352 351 if (state == NULL) {
353 352 return (DDI_FAILURE);
354 353 }
355 354 *result = (void *)state->xs_dip;
356 355 err = DDI_SUCCESS;
357 356 break;
358 357
359 358 case DDI_INFO_DEVT2INSTANCE:
360 359 *result = (void *)(uintptr_t)instance;
361 360 err = DDI_SUCCESS;
362 361 break;
363 362
364 363 default:
365 364 err = DDI_FAILURE;
366 365 break;
367 366 }
368 367
369 368 return (err);
370 369 }
371 370
372 371
373 372 /*
374 373 * xsvc_open()
375 374 *
376 375 */
377 376 /*ARGSUSED*/
378 377 static int
379 378 xsvc_open(dev_t *devp, int flag, int otyp, cred_t *cred)
380 379 {
381 380 xsvc_state_t *state;
382 381 int instance;
383 382
384 383 instance = getminor(*devp);
385 384 state = ddi_get_soft_state(xsvc_statep, instance);
386 385 if (state == NULL) {
387 386 return (ENXIO);
388 387 }
389 388
390 389 return (0);
391 390 }
392 391
393 392 /*
394 393 * xsvc_close()
395 394 *
396 395 */
397 396 /*ARGSUSED*/
398 397 static int
399 398 xsvc_close(dev_t devp, int flag, int otyp, cred_t *cred)
400 399 {
401 400 return (0);
402 401 }
403 402
404 403 /*
405 404 * xsvc_ioctl()
406 405 *
407 406 */
408 407 /*ARGSUSED*/
409 408 static int
410 409 xsvc_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred, int *rval)
411 410 {
412 411 xsvc_state_t *state;
413 412 int instance;
414 413 int err;
415 414
416 415
417 416 err = drv_priv(cred);
418 417 if (err != 0) {
419 418 return (EPERM);
420 419 }
421 420 instance = getminor(dev);
422 421 if (instance == -1) {
423 422 return (EBADF);
424 423 }
425 424 state = ddi_get_soft_state(xsvc_statep, instance);
426 425 if (state == NULL) {
427 426 return (EBADF);
428 427 }
429 428
430 429 switch (cmd) {
431 430 case XSVC_ALLOC_MEM:
432 431 err = xsvc_ioctl_alloc_memory(state, (void *)arg, mode);
433 432 break;
434 433
435 434 case XSVC_FREE_MEM:
436 435 err = xsvc_ioctl_free_memory(state, (void *)arg, mode);
437 436 break;
438 437
439 438 case XSVC_FLUSH_MEM:
440 439 err = xsvc_ioctl_flush_memory(state, (void *)arg, mode);
441 440 break;
442 441
443 442 default:
444 443 err = ENXIO;
445 444 }
446 445
447 446 return (err);
448 447 }
449 448
450 449 /*
451 450 * xsvc_ioctl_alloc_memory()
452 451 *
453 452 */
454 453 static int
455 454 xsvc_ioctl_alloc_memory(xsvc_state_t *state, void *arg, int mode)
456 455 {
457 456 xsvc_mem_req_32 params32;
458 457 xsvc_mloc_32 *usgl32;
459 458 xsvc_mem_req params;
460 459 xsvc_mloc_32 sgl32;
461 460 xsvc_mloc *usgl;
462 461 xsvc_mem_t *mp;
463 462 xsvc_mloc sgl;
464 463 uint64_t key;
465 464 size_t size;
466 465 int err;
467 466 int i;
468 467
469 468
470 469 /* Copy in the params, then get the size and key */
471 470 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
472 471 err = ddi_copyin(arg, ¶ms32, sizeof (xsvc_mem_req_32),
473 472 mode);
474 473 if (err != 0) {
475 474 return (EFAULT);
476 475 }
477 476
478 477 key = (uint64_t)params32.xsvc_mem_reqid;
479 478 size = P2ROUNDUP((size_t)params32.xsvc_mem_size, PAGESIZE);
480 479 } else {
481 480 err = ddi_copyin(arg, ¶ms, sizeof (xsvc_mem_req), mode);
482 481 if (err != 0) {
483 482 return (EFAULT);
484 483 }
485 484 key = (uint64_t)params.xsvc_mem_reqid;
486 485 size = P2ROUNDUP(params.xsvc_mem_size, PAGESIZE);
487 486 }
488 487
489 488 /*
490 489 * make sure this doesn't put us over the maximum allowed to be
491 490 * allocated
492 491 */
493 492 mutex_enter(&state->xs_mutex);
494 493 if ((state->xs_currently_alloced + size) > xsvc_max_memory) {
495 494 mutex_exit(&state->xs_mutex);
496 495 return (EAGAIN);
497 496 }
498 497 state->xs_currently_alloced += size;
499 498 mutex_exit(&state->xs_mutex);
500 499
501 500 /* get state to track this memory */
502 501 err = xsvc_mem_alloc(state, key, &mp);
503 502 if (err != 0) {
504 503 return (err);
505 504 }
506 505 mp->xm_size = size;
507 506
508 507 /* allocate and bind the memory */
509 508 mp->xm_dma_attr.dma_attr_version = DMA_ATTR_V0;
510 509 mp->xm_dma_attr.dma_attr_count_max = (uint64_t)0xFFFFFFFF;
511 510 mp->xm_dma_attr.dma_attr_burstsizes = 1;
512 511 mp->xm_dma_attr.dma_attr_minxfer = 1;
513 512 mp->xm_dma_attr.dma_attr_maxxfer = (uint64_t)0xFFFFFFFF;
514 513 mp->xm_dma_attr.dma_attr_seg = (uint64_t)0xFFFFFFFF;
515 514 mp->xm_dma_attr.dma_attr_granular = 1;
516 515 mp->xm_dma_attr.dma_attr_flags = 0;
517 516
518 517 /* Finish converting params */
519 518 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
520 519 mp->xm_dma_attr.dma_attr_addr_lo = params32.xsvc_mem_addr_lo;
521 520 mp->xm_dma_attr.dma_attr_addr_hi = params32.xsvc_mem_addr_hi;
522 521 mp->xm_dma_attr.dma_attr_sgllen = params32.xsvc_mem_sgllen;
523 522 usgl32 = (xsvc_mloc_32 *)(uintptr_t)params32.xsvc_sg_list;
524 523 mp->xm_dma_attr.dma_attr_align = P2ROUNDUP(
525 524 params32.xsvc_mem_align, PAGESIZE);
526 525 } else {
527 526 mp->xm_dma_attr.dma_attr_addr_lo = params.xsvc_mem_addr_lo;
528 527 mp->xm_dma_attr.dma_attr_addr_hi = params.xsvc_mem_addr_hi;
529 528 mp->xm_dma_attr.dma_attr_sgllen = params.xsvc_mem_sgllen;
530 529 usgl = (xsvc_mloc *)(uintptr_t)params.xsvc_sg_list;
531 530 mp->xm_dma_attr.dma_attr_align = P2ROUNDUP(
532 531 params.xsvc_mem_align, PAGESIZE);
533 532 }
534 533
535 534 mp->xm_device_attr = xsvc_device_attr;
536 535
537 536 err = ddi_dma_alloc_handle(state->xs_dip, &mp->xm_dma_attr,
538 537 DDI_DMA_SLEEP, NULL, &mp->xm_dma_handle);
539 538 if (err != DDI_SUCCESS) {
540 539 err = EINVAL;
541 540 goto allocfail_alloc_handle;
542 541 }
543 542
544 543 /* don't sleep here so we don't get stuck in contig alloc */
545 544 err = ddi_dma_mem_alloc(mp->xm_dma_handle, mp->xm_size,
546 545 &mp->xm_device_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
547 546 &mp->xm_addr, &mp->xm_real_length, &mp->xm_mem_handle);
548 547 if (err != DDI_SUCCESS) {
549 548 err = EINVAL;
550 549 goto allocfail_alloc_mem;
551 550 }
552 551
553 552 err = ddi_dma_addr_bind_handle(mp->xm_dma_handle, NULL, mp->xm_addr,
554 553 mp->xm_size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
555 554 NULL, &mp->xm_cookie, &mp->xm_cookie_count);
556 555 if (err != DDI_DMA_MAPPED) {
557 556 err = EFAULT;
558 557 goto allocfail_bind;
559 558 }
560 559
561 560 /* return sgl */
562 561 for (i = 0; i < mp->xm_cookie_count; i++) {
563 562 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
564 563 sgl32.mloc_addr = mp->xm_cookie.dmac_laddress;
565 564 sgl32.mloc_size = mp->xm_cookie.dmac_size;
566 565 err = ddi_copyout(&sgl32, &usgl32[i],
567 566 sizeof (xsvc_mloc_32), mode);
568 567 if (err != 0) {
569 568 err = EFAULT;
570 569 goto allocfail_copyout;
571 570 }
572 571 } else {
573 572 sgl.mloc_addr = mp->xm_cookie.dmac_laddress;
574 573 sgl.mloc_size = mp->xm_cookie.dmac_size;
575 574 err = ddi_copyout(&sgl, &usgl[i], sizeof (xsvc_mloc),
576 575 mode);
577 576 if (err != 0) {
578 577 err = EFAULT;
579 578 goto allocfail_copyout;
580 579 }
581 580 }
582 581 ddi_dma_nextcookie(mp->xm_dma_handle, &mp->xm_cookie);
583 582 }
584 583
585 584 /* set the last sgl entry to 0 to indicate cookie count */
586 585 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
587 586 sgl32.mloc_addr = 0;
588 587 sgl32.mloc_size = 0;
589 588 err = ddi_copyout(&sgl32, &usgl32[i], sizeof (xsvc_mloc_32),
590 589 mode);
591 590 if (err != 0) {
592 591 err = EFAULT;
593 592 goto allocfail_copyout;
594 593 }
595 594 } else {
596 595 sgl.mloc_addr = 0;
597 596 sgl.mloc_size = 0;
598 597 err = ddi_copyout(&sgl, &usgl[i], sizeof (xsvc_mloc), mode);
599 598 if (err != 0) {
600 599 err = EFAULT;
601 600 goto allocfail_copyout;
602 601 }
603 602 }
604 603
605 604 return (0);
606 605
607 606 allocfail_copyout:
608 607 (void) ddi_dma_unbind_handle(mp->xm_dma_handle);
609 608 allocfail_bind:
610 609 ddi_dma_mem_free(&mp->xm_mem_handle);
611 610 allocfail_alloc_mem:
612 611 ddi_dma_free_handle(&mp->xm_dma_handle);
613 612 allocfail_alloc_handle:
614 613 mp->xm_dma_handle = NULL;
615 614 xsvc_mem_free(state, mp);
616 615
617 616 mutex_enter(&state->xs_mutex);
618 617 state->xs_currently_alloced = state->xs_currently_alloced - size;
619 618 mutex_exit(&state->xs_mutex);
620 619
621 620 return (err);
622 621 }
623 622
624 623 /*
625 624 * xsvc_ioctl_flush_memory()
626 625 *
627 626 */
628 627 static int
629 628 xsvc_ioctl_flush_memory(xsvc_state_t *state, void *arg, int mode)
630 629 {
631 630 xsvc_mem_req_32 params32;
632 631 xsvc_mem_req params;
633 632 xsvc_mem_t *mp;
634 633 uint64_t key;
635 634 int err;
636 635
637 636
638 637 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
639 638 err = ddi_copyin(arg, ¶ms32, sizeof (xsvc_mem_req_32),
640 639 mode);
641 640 if (err != 0) {
642 641 return (EFAULT);
643 642 }
644 643 key = (uint64_t)params32.xsvc_mem_reqid;
645 644 } else {
646 645 err = ddi_copyin(arg, ¶ms, sizeof (xsvc_mem_req), mode);
647 646 if (err != 0) {
648 647 return (EFAULT);
649 648 }
650 649 key = (uint64_t)params.xsvc_mem_reqid;
651 650 }
652 651
653 652 /* find the memory */
654 653 mp = xsvc_mem_lookup(state, key);
655 654 if (mp == NULL) {
656 655 return (EINVAL);
657 656 }
658 657
659 658 (void) ddi_dma_sync(mp->xm_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
660 659
661 660 return (0);
662 661 }
663 662
664 663
665 664 /*
666 665 * xsvc_ioctl_free_memory()
667 666 *
668 667 */
669 668 static int
670 669 xsvc_ioctl_free_memory(xsvc_state_t *state, void *arg, int mode)
671 670 {
672 671 xsvc_mem_req_32 params32;
673 672 xsvc_mem_req params;
674 673 xsvc_mem_t *mp;
675 674 uint64_t key;
676 675 int err;
677 676
678 677
679 678 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
680 679 err = ddi_copyin(arg, ¶ms32, sizeof (xsvc_mem_req_32),
681 680 mode);
682 681 if (err != 0) {
683 682 return (EFAULT);
684 683 }
685 684 key = (uint64_t)params32.xsvc_mem_reqid;
686 685 } else {
687 686 err = ddi_copyin(arg, ¶ms, sizeof (xsvc_mem_req), mode);
688 687 if (err != 0) {
689 688 return (EFAULT);
690 689 }
691 690 key = (uint64_t)params.xsvc_mem_reqid;
692 691 }
693 692
694 693 /* find the memory */
695 694 mp = xsvc_mem_lookup(state, key);
696 695 if (mp == NULL) {
697 696 return (EINVAL);
698 697 }
699 698
700 699 xsvc_mem_free(state, mp);
701 700
702 701 return (0);
703 702 }
704 703
705 704 /*
706 705 * xsvc_mem_alloc()
707 706 *
708 707 */
709 708 static int
710 709 xsvc_mem_alloc(xsvc_state_t *state, uint64_t key, xsvc_mem_t **mp)
711 710 {
712 711 xsvc_mem_t *mem;
713 712
714 713 mem = xsvc_mem_lookup(state, key);
715 714 if (mem != NULL) {
716 715 xsvc_mem_free(state, mem);
717 716 }
718 717
719 718 *mp = kmem_alloc(sizeof (xsvc_mem_t), KM_SLEEP);
720 719 (*mp)->xm_mnode.mn_home = *mp;
721 720 (*mp)->xm_mnode.mn_key = key;
722 721
723 722 mutex_enter(&state->xs_mlist.ml_mutex);
724 723 avl_add(&state->xs_mlist.ml_avl, &(*mp)->xm_mnode);
725 724 mutex_exit(&state->xs_mlist.ml_mutex);
726 725
727 726 return (0);
728 727 }
729 728
730 729 /*
731 730 * xsvc_mem_free()
732 731 *
733 732 */
734 733 static void
735 734 xsvc_mem_free(xsvc_state_t *state, xsvc_mem_t *mp)
736 735 {
737 736 if (mp->xm_dma_handle != NULL) {
738 737 (void) ddi_dma_unbind_handle(mp->xm_dma_handle);
739 738 ddi_dma_mem_free(&mp->xm_mem_handle);
740 739 ddi_dma_free_handle(&mp->xm_dma_handle);
741 740
742 741 mutex_enter(&state->xs_mutex);
743 742 state->xs_currently_alloced = state->xs_currently_alloced -
744 743 mp->xm_size;
745 744 mutex_exit(&state->xs_mutex);
746 745 }
747 746
748 747 mutex_enter(&state->xs_mlist.ml_mutex);
749 748 avl_remove(&state->xs_mlist.ml_avl, &mp->xm_mnode);
750 749 mutex_exit(&state->xs_mlist.ml_mutex);
751 750
752 751 kmem_free(mp, sizeof (*mp));
753 752 }
754 753
755 754 /*
756 755 * xsvc_mem_lookup()
757 756 *
758 757 */
759 758 static xsvc_mem_t *
760 759 xsvc_mem_lookup(xsvc_state_t *state, uint64_t key)
761 760 {
762 761 xsvc_mnode_t mnode;
763 762 xsvc_mnode_t *mnp;
764 763 avl_index_t where;
765 764 xsvc_mem_t *mp;
766 765
767 766 mnode.mn_key = key;
768 767 mutex_enter(&state->xs_mlist.ml_mutex);
769 768 mnp = avl_find(&state->xs_mlist.ml_avl, &mnode, &where);
770 769 mutex_exit(&state->xs_mlist.ml_mutex);
771 770
772 771 if (mnp != NULL) {
773 772 mp = mnp->mn_home;
774 773 } else {
775 774 mp = NULL;
776 775 }
777 776
778 777 return (mp);
779 778 }
780 779
781 780 /*
782 781 * xsvc_mnode_key_compare()
783 782 *
784 783 */
785 784 static int
786 785 xsvc_mnode_key_compare(const void *q, const void *e)
787 786 {
788 787 xsvc_mnode_t *n1;
789 788 xsvc_mnode_t *n2;
790 789
791 790 n1 = (xsvc_mnode_t *)q;
792 791 n2 = (xsvc_mnode_t *)e;
793 792
794 793 if (n1->mn_key < n2->mn_key) {
795 794 return (-1);
796 795 } else if (n1->mn_key > n2->mn_key) {
797 796 return (1);
798 797 } else {
799 798 return (0);
800 799 }
801 800 }
802 801
803 802 /*
804 803 * xsvc_devmap()
805 804 *
806 805 */
807 806 /*ARGSUSED*/
808 807 static int
809 808 xsvc_devmap(dev_t dev, devmap_cookie_t dhp, offset_t off, size_t len,
810 809 size_t *maplen, uint_t model)
811 810 {
812 811 ddi_umem_cookie_t cookie;
813 812 xsvc_state_t *state;
814 813 offset_t off_align;
815 814 size_t npages;
816 815 caddr_t kvai;
817 816 size_t psize;
818 817 int instance;
819 818 caddr_t kva;
820 819 pfn_t pfn;
821 820 int err;
822 821 int i;
823 822
824 823
825 824 instance = getminor(dev);
826 825 state = ddi_get_soft_state(xsvc_statep, instance);
827 826 if (state == NULL) {
828 827 return (ENXIO);
829 828 }
830 829
831 830 /*
832 831 * On 64-bit kernels, if we have a 32-bit application doing a mmap(),
833 832 * smmap32 will sign extend the offset. We need to undo that since
834 833 * we are passed a physical address in off, not a offset.
835 834 */
836 835 #if defined(__amd64)
837 836 if (((model & DDI_MODEL_MASK) == DDI_MODEL_ILP32) &&
838 837 ((off & ~0xFFFFFFFFll) == ~0xFFFFFFFFll)) {
839 838 off = off & 0xFFFFFFFF;
840 839 }
841 840 #endif
842 841
843 842 #ifdef __xpv
844 843 /*
845 844 * we won't allow guest OSes to devmap mfn/pfns. Maybe we'll relax
846 845 * this some later when there is a good reason.
847 846 */
848 847 if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
849 848 return (-1);
850 849 }
851 850
852 851 /* we will always treat this as a foreign MFN */
853 852 pfn = xen_assign_pfn(btop(off));
854 853 #else
855 854 pfn = btop(off);
856 855 #endif
857 856 /* always work with whole pages */
858 857
859 858 off_align = P2ALIGN(off, PAGESIZE);
860 859 psize = P2ROUNDUP(off + len, PAGESIZE) - off_align;
861 860
862 861 /*
863 862 * if this is memory we're trying to map into user space, we first
864 863 * need to map the PFNs into KVA, then build up a umem cookie, and
865 864 * finally do a umem_setup to map it in.
866 865 */
867 866 if (pf_is_memory(pfn)) {
868 867 npages = btop(psize);
869 868
870 869 kva = vmem_alloc(heap_arena, psize, VM_SLEEP);
871 870 if (kva == NULL) {
872 871 return (-1);
873 872 }
874 873
875 874 kvai = kva;
876 875 for (i = 0; i < npages; i++) {
877 876 hat_devload(kas.a_hat, kvai, PAGESIZE, pfn,
878 877 PROT_READ | PROT_WRITE, HAT_LOAD_LOCK);
879 878 pfn++;
880 879 kvai = (caddr_t)((uintptr_t)kvai + PAGESIZE);
881 880 }
882 881
883 882 err = xsvc_umem_cookie_alloc(kva, psize, KM_SLEEP, &cookie);
884 883 if (err != 0) {
885 884 goto devmapfail_cookie_alloc;
886 885 }
887 886
888 887 if ((err = devmap_umem_setup(dhp, state->xs_dip, &xsvc_callbk,
889 888 cookie, 0, psize, PROT_ALL, 0, &xsvc_device_attr)) < 0) {
890 889 goto devmapfail_umem_setup;
891 890 }
892 891 *maplen = psize;
893 892
894 893 /*
895 894 * If this is not memory (or a foreign MFN in i86xpv), go through
896 895 * devmem_setup.
897 896 */
898 897 } else {
899 898 if ((err = devmap_devmem_setup(dhp, state->xs_dip, NULL, 0,
900 899 off_align, psize, PROT_ALL, 0, &xsvc_device_attr)) < 0) {
901 900 return (err);
902 901 }
903 902 *maplen = psize;
904 903 }
905 904
906 905 return (0);
907 906
908 907 devmapfail_umem_setup:
909 908 xsvc_umem_cookie_free(&cookie);
910 909
911 910 devmapfail_cookie_alloc:
912 911 kvai = kva;
913 912 for (i = 0; i < npages; i++) {
914 913 hat_unload(kas.a_hat, kvai, PAGESIZE,
915 914 HAT_UNLOAD_UNLOCK);
916 915 kvai = (caddr_t)((uintptr_t)kvai + PAGESIZE);
917 916 }
918 917 vmem_free(heap_arena, kva, psize);
919 918
920 919 return (err);
921 920 }
922 921
923 922 /*
924 923 * xsvc_umem_cookie_alloc()
925 924 *
926 925 * allocate a umem cookie to be used in devmap_umem_setup using KVA already
927 926 * allocated.
928 927 */
929 928 int
930 929 xsvc_umem_cookie_alloc(caddr_t kva, size_t size, int flags,
931 930 ddi_umem_cookie_t *cookiep)
932 931 {
933 932 struct ddi_umem_cookie *umem_cookiep;
934 933
935 934 umem_cookiep = kmem_zalloc(sizeof (struct ddi_umem_cookie), flags);
936 935 if (umem_cookiep == NULL) {
937 936 *cookiep = NULL;
938 937 return (-1);
939 938 }
940 939
941 940 umem_cookiep->cvaddr = kva;
942 941 umem_cookiep->type = KMEM_NON_PAGEABLE;
943 942 umem_cookiep->size = size;
944 943 *cookiep = (ddi_umem_cookie_t *)umem_cookiep;
945 944
946 945 return (0);
947 946 }
948 947
949 948 /*
950 949 * xsvc_umem_cookie_free()
951 950 *
952 951 */
953 952 static void
954 953 xsvc_umem_cookie_free(ddi_umem_cookie_t *cookiep)
955 954 {
956 955 kmem_free(*cookiep, sizeof (struct ddi_umem_cookie));
957 956 *cookiep = NULL;
958 957 }
959 958
960 959
961 960 /*
962 961 * xsvc_devmap_map()
963 962 *
964 963 */
965 964 /*ARGSUSED*/
966 965 static int
967 966 xsvc_devmap_map(devmap_cookie_t dhc, dev_t dev, uint_t flags, offset_t off,
968 967 size_t len, void **pvtp)
969 968 {
970 969 struct ddi_umem_cookie *cp;
971 970 devmap_handle_t *dhp;
972 971 xsvc_state_t *state;
973 972 int instance;
974 973
975 974
976 975 instance = getminor(dev);
977 976 state = ddi_get_soft_state(xsvc_statep, instance);
978 977 if (state == NULL) {
979 978 return (ENXIO);
980 979 }
981 980
982 981 dhp = (devmap_handle_t *)dhc;
983 982 /* This driver only supports MAP_SHARED, not MAP_PRIVATE */
984 983 if (flags & MAP_PRIVATE) {
985 984 cmn_err(CE_WARN, "!xsvc driver doesn't support MAP_PRIVATE");
986 985 return (EINVAL);
987 986 }
988 987
989 988 cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
990 989 cp->cook_refcnt = 1;
991 990
992 991 *pvtp = state;
993 992 return (0);
994 993 }
995 994
996 995
997 996 /*
998 997 * xsvc_devmap_dup()
999 998 *
1000 999 * keep a reference count for forks so we don't unmap if we have multiple
1001 1000 * mappings.
1002 1001 */
1003 1002 /*ARGSUSED*/
1004 1003 static int
1005 1004 xsvc_devmap_dup(devmap_cookie_t dhc, void *pvtp, devmap_cookie_t new_dhp,
1006 1005 void **new_pvtp)
1007 1006 {
1008 1007 struct ddi_umem_cookie *cp;
1009 1008 devmap_handle_t *dhp;
1010 1009 xsvc_state_t *state;
1011 1010
1012 1011
1013 1012 state = (xsvc_state_t *)pvtp;
1014 1013 dhp = (devmap_handle_t *)dhc;
1015 1014
1016 1015 mutex_enter(&state->xs_cookie_mutex);
1017 1016 cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
1018 1017 if (cp == NULL) {
1019 1018 mutex_exit(&state->xs_cookie_mutex);
1020 1019 return (ENOMEM);
1021 1020 }
1022 1021
1023 1022 cp->cook_refcnt++;
1024 1023 mutex_exit(&state->xs_cookie_mutex);
1025 1024
1026 1025 *new_pvtp = state;
1027 1026 return (0);
1028 1027 }
1029 1028
1030 1029
1031 1030 /*
1032 1031 * xsvc_devmap_unmap()
1033 1032 *
1034 1033 * This routine is only call if we were mapping in memory in xsvc_devmap().
1035 1034 * i.e. we only pass in xsvc_callbk to devmap_umem_setup if pf_is_memory()
1036 1035 * was true. It would have been nice if devmap_callback_ctl had an args param.
1037 1036 * We wouldn't have had to look into the devmap_handle and into the umem
1038 1037 * cookie.
1039 1038 */
1040 1039 /*ARGSUSED*/
1041 1040 static void
1042 1041 xsvc_devmap_unmap(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
1043 1042 devmap_cookie_t new_dhp1, void **new_pvtp1, devmap_cookie_t new_dhp2,
1044 1043 void **new_pvtp2)
1045 1044 {
1046 1045 struct ddi_umem_cookie *ncp;
1047 1046 struct ddi_umem_cookie *cp;
1048 1047 devmap_handle_t *ndhp;
1049 1048 devmap_handle_t *dhp;
1050 1049 xsvc_state_t *state;
1051 1050 size_t npages;
1052 1051 caddr_t kvai;
1053 1052 caddr_t kva;
1054 1053 size_t size;
1055 1054 int i;
1056 1055
1057 1056
1058 1057 state = (xsvc_state_t *)pvtp;
1059 1058 mutex_enter(&state->xs_cookie_mutex);
1060 1059
1061 1060 /* peek into the umem cookie to figure out what we need to free up */
1062 1061 dhp = (devmap_handle_t *)dhc;
1063 1062 cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
1064 1063 ASSERT(cp != NULL);
1065 1064
1066 1065 if (new_dhp1 != NULL) {
1067 1066 ndhp = (devmap_handle_t *)new_dhp1;
1068 1067 ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
1069 1068 ncp->cook_refcnt++;
1070 1069 *new_pvtp1 = state;
1071 1070 }
1072 1071 if (new_dhp2 != NULL) {
1073 1072 ndhp = (devmap_handle_t *)new_dhp2;
1074 1073 ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
1075 1074 ncp->cook_refcnt++;
1076 1075 *new_pvtp2 = state;
1077 1076 }
1078 1077
1079 1078 cp->cook_refcnt--;
1080 1079 if (cp->cook_refcnt == 0) {
1081 1080 kva = cp->cvaddr;
1082 1081 size = cp->size;
1083 1082
1084 1083 /*
1085 1084 * free up the umem cookie, then unmap all the pages what we
1086 1085 * mapped in during devmap, then free up the kva space.
1087 1086 */
1088 1087 npages = btop(size);
1089 1088 xsvc_umem_cookie_free(&dhp->dh_cookie);
1090 1089 kvai = kva;
1091 1090 for (i = 0; i < npages; i++) {
1092 1091 hat_unload(kas.a_hat, kvai, PAGESIZE,
1093 1092 HAT_UNLOAD_UNLOCK);
1094 1093 kvai = (caddr_t)((uintptr_t)kvai + PAGESIZE);
1095 1094 }
1096 1095 vmem_free(heap_arena, kva, size);
1097 1096 }
1098 1097
1099 1098 mutex_exit(&state->xs_cookie_mutex);
1100 1099 }
↓ open down ↓ |
981 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX