1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * dcopy.c 29 * dcopy misc module 30 */ 31 32 #include <sys/conf.h> 33 #include <sys/kmem.h> 34 #include <sys/ddi.h> 35 #include <sys/sunddi.h> 36 #include <sys/modctl.h> 37 #include <sys/sysmacros.h> 38 #include <sys/atomic.h> 39 40 41 #include <sys/dcopy.h> 42 #include <sys/dcopy_device.h> 43 44 45 /* Number of entries per channel to allocate */ 46 uint_t dcopy_channel_size = 1024; 47 48 49 typedef struct dcopy_list_s { 50 list_t dl_list; 51 kmutex_t dl_mutex; 52 uint_t dl_cnt; /* num entries on list */ 53 } dcopy_list_t; 54 55 /* device state for register/unregister */ 56 struct dcopy_device_s { 57 /* DMA device drivers private pointer */ 58 void *dc_device_private; 59 60 /* to track list of channels from this DMA device */ 61 dcopy_list_t dc_devchan_list; 62 list_node_t dc_device_list_node; 63 64 /* 65 * dc_removing_cnt track how many channels still have to be freed up 66 * before it's safe to allow the DMA device driver to detach. 67 */ 68 uint_t dc_removing_cnt; 69 dcopy_device_cb_t *dc_cb; 70 71 dcopy_device_info_t dc_info; 72 73 }; 74 75 typedef struct dcopy_stats_s { 76 kstat_named_t cs_bytes_xfer; 77 kstat_named_t cs_cmd_alloc; 78 kstat_named_t cs_cmd_post; 79 kstat_named_t cs_cmd_poll; 80 kstat_named_t cs_notify_poll; 81 kstat_named_t cs_notify_pending; 82 kstat_named_t cs_id; 83 kstat_named_t cs_capabilities; 84 } dcopy_stats_t; 85 86 /* DMA channel state */ 87 struct dcopy_channel_s { 88 /* DMA driver channel private pointer */ 89 void *ch_channel_private; 90 91 /* shortcut to device callbacks */ 92 dcopy_device_cb_t *ch_cb; 93 94 /* 95 * number of outstanding allocs for this channel. used to track when 96 * it's safe to free up this channel so the DMA device driver can 97 * detach. 98 */ 99 uint64_t ch_ref_cnt; 100 101 /* state for if channel needs to be removed when ch_ref_cnt gets to 0 */ 102 boolean_t ch_removing; 103 104 list_node_t ch_devchan_list_node; 105 list_node_t ch_globalchan_list_node; 106 107 /* 108 * per channel list of commands actively blocking waiting for 109 * completion. 110 */ 111 dcopy_list_t ch_poll_list; 112 113 /* pointer back to our device */ 114 struct dcopy_device_s *ch_device; 115 116 dcopy_query_channel_t ch_info; 117 118 kstat_t *ch_kstat; 119 dcopy_stats_t ch_stat; 120 }; 121 122 /* 123 * If grabbing both device_list mutex & globalchan_list mutex, 124 * Always grab globalchan_list mutex before device_list mutex 125 */ 126 typedef struct dcopy_state_s { 127 dcopy_list_t d_device_list; 128 dcopy_list_t d_globalchan_list; 129 } dcopy_state_t; 130 dcopy_state_t *dcopy_statep; 131 132 133 /* Module Driver Info */ 134 static struct modlmisc dcopy_modlmisc = { 135 &mod_miscops, 136 "dcopy kernel module" 137 }; 138 139 /* Module Linkage */ 140 static struct modlinkage dcopy_modlinkage = { 141 MODREV_1, 142 { &dcopy_modlmisc, NULL } 143 }; 144 145 static int dcopy_init(); 146 static void dcopy_fini(); 147 148 static int dcopy_list_init(dcopy_list_t *list, size_t node_size, 149 offset_t link_offset); 150 static void dcopy_list_fini(dcopy_list_t *list); 151 static void dcopy_list_push(dcopy_list_t *list, void *list_node); 152 static void *dcopy_list_pop(dcopy_list_t *list); 153 154 static void dcopy_device_cleanup(dcopy_device_handle_t device, 155 boolean_t do_callback); 156 157 static int dcopy_stats_init(dcopy_handle_t channel); 158 static void dcopy_stats_fini(dcopy_handle_t channel); 159 160 161 /* 162 * _init() 163 */ 164 int 165 _init() 166 { 167 int e; 168 169 e = dcopy_init(); 170 if (e != 0) { 171 return (e); 172 } 173 174 return (mod_install(&dcopy_modlinkage)); 175 } 176 177 178 /* 179 * _info() 180 */ 181 int 182 _info(struct modinfo *modinfop) 183 { 184 return (mod_info(&dcopy_modlinkage, modinfop)); 185 } 186 187 188 /* 189 * _fini() 190 */ 191 int 192 _fini() 193 { 194 int e; 195 196 e = mod_remove(&dcopy_modlinkage); 197 if (e != 0) { 198 return (e); 199 } 200 201 dcopy_fini(); 202 203 return (e); 204 } 205 206 /* 207 * dcopy_init() 208 */ 209 static int 210 dcopy_init() 211 { 212 int e; 213 214 215 dcopy_statep = kmem_zalloc(sizeof (*dcopy_statep), KM_SLEEP); 216 217 /* Initialize the list we use to track device register/unregister */ 218 e = dcopy_list_init(&dcopy_statep->d_device_list, 219 sizeof (struct dcopy_device_s), 220 offsetof(struct dcopy_device_s, dc_device_list_node)); 221 if (e != DCOPY_SUCCESS) { 222 goto dcopyinitfail_device; 223 } 224 225 /* Initialize the list we use to track all DMA channels */ 226 e = dcopy_list_init(&dcopy_statep->d_globalchan_list, 227 sizeof (struct dcopy_channel_s), 228 offsetof(struct dcopy_channel_s, ch_globalchan_list_node)); 229 if (e != DCOPY_SUCCESS) { 230 goto dcopyinitfail_global; 231 } 232 233 return (0); 234 235 dcopyinitfail_cback: 236 dcopy_list_fini(&dcopy_statep->d_globalchan_list); 237 dcopyinitfail_global: 238 dcopy_list_fini(&dcopy_statep->d_device_list); 239 dcopyinitfail_device: 240 kmem_free(dcopy_statep, sizeof (*dcopy_statep)); 241 242 return (-1); 243 } 244 245 246 /* 247 * dcopy_fini() 248 */ 249 static void 250 dcopy_fini() 251 { 252 /* 253 * if mod_remove was successfull, we shouldn't have any 254 * devices/channels to worry about. 255 */ 256 ASSERT(list_head(&dcopy_statep->d_globalchan_list.dl_list) == NULL); 257 ASSERT(list_head(&dcopy_statep->d_device_list.dl_list) == NULL); 258 259 dcopy_list_fini(&dcopy_statep->d_globalchan_list); 260 dcopy_list_fini(&dcopy_statep->d_device_list); 261 kmem_free(dcopy_statep, sizeof (*dcopy_statep)); 262 } 263 264 265 /* *** EXTERNAL INTERFACE *** */ 266 /* 267 * dcopy_query() 268 */ 269 void 270 dcopy_query(dcopy_query_t *query) 271 { 272 query->dq_version = DCOPY_QUERY_V0; 273 query->dq_num_channels = dcopy_statep->d_globalchan_list.dl_cnt; 274 } 275 276 277 /* 278 * dcopy_alloc() 279 */ 280 /*ARGSUSED*/ 281 int 282 dcopy_alloc(int flags, dcopy_handle_t *handle) 283 { 284 dcopy_handle_t channel; 285 dcopy_list_t *list; 286 287 288 /* 289 * we don't use the dcopy_list_* code here because we need to due 290 * some non-standard stuff. 291 */ 292 293 list = &dcopy_statep->d_globalchan_list; 294 295 /* 296 * if nothing is on the channel list, return DCOPY_NORESOURCES. This 297 * can happen if there aren't any DMA device registered. 298 */ 299 mutex_enter(&list->dl_mutex); 300 channel = list_head(&list->dl_list); 301 if (channel == NULL) { 302 mutex_exit(&list->dl_mutex); 303 return (DCOPY_NORESOURCES); 304 } 305 306 /* 307 * increment the reference count, and pop the channel off the head and 308 * push it on the tail. This ensures we rotate through the channels. 309 * DMA channels are shared. 310 */ 311 channel->ch_ref_cnt++; 312 list_remove(&list->dl_list, channel); 313 list_insert_tail(&list->dl_list, channel); 314 mutex_exit(&list->dl_mutex); 315 316 *handle = (dcopy_handle_t)channel; 317 return (DCOPY_SUCCESS); 318 } 319 320 321 /* 322 * dcopy_free() 323 */ 324 void 325 dcopy_free(dcopy_handle_t *channel) 326 { 327 dcopy_device_handle_t device; 328 dcopy_list_t *list; 329 boolean_t cleanup = B_FALSE; 330 331 332 ASSERT(*channel != NULL); 333 334 /* 335 * we don't need to add the channel back to the list since we never 336 * removed it. decrement the reference count. 337 */ 338 list = &dcopy_statep->d_globalchan_list; 339 mutex_enter(&list->dl_mutex); 340 (*channel)->ch_ref_cnt--; 341 342 /* 343 * if we need to remove this channel, and the reference count is down 344 * to 0, decrement the number of channels which still need to be 345 * removed on the device. 346 */ 347 if ((*channel)->ch_removing && ((*channel)->ch_ref_cnt == 0)) { 348 device = (*channel)->ch_device; 349 mutex_enter(&device->dc_devchan_list.dl_mutex); 350 device->dc_removing_cnt--; 351 if (device->dc_removing_cnt == 0) { 352 cleanup = B_TRUE; 353 } 354 mutex_exit(&device->dc_devchan_list.dl_mutex); 355 } 356 mutex_exit(&list->dl_mutex); 357 358 /* 359 * if there are no channels which still need to be removed, cleanup the 360 * device state and call back into the DMA device driver to tell them 361 * the device is free. 362 */ 363 if (cleanup) { 364 dcopy_device_cleanup(device, B_TRUE); 365 } 366 367 *channel = NULL; 368 } 369 370 371 /* 372 * dcopy_query_channel() 373 */ 374 void 375 dcopy_query_channel(dcopy_handle_t channel, dcopy_query_channel_t *query) 376 { 377 *query = channel->ch_info; 378 } 379 380 381 /* 382 * dcopy_cmd_alloc() 383 */ 384 int 385 dcopy_cmd_alloc(dcopy_handle_t handle, int flags, dcopy_cmd_t *cmd) 386 { 387 dcopy_handle_t channel; 388 dcopy_cmd_priv_t priv; 389 int e; 390 391 392 channel = handle; 393 394 atomic_inc_64(&channel->ch_stat.cs_cmd_alloc.value.ui64); 395 e = channel->ch_cb->cb_cmd_alloc(channel->ch_channel_private, flags, 396 cmd); 397 if (e == DCOPY_SUCCESS) { 398 priv = (*cmd)->dp_private; 399 priv->pr_channel = channel; 400 /* 401 * we won't initialize the blocking state until we actually 402 * need to block. 403 */ 404 priv->pr_block_init = B_FALSE; 405 } 406 407 return (e); 408 } 409 410 411 /* 412 * dcopy_cmd_free() 413 */ 414 void 415 dcopy_cmd_free(dcopy_cmd_t *cmd) 416 { 417 dcopy_handle_t channel; 418 dcopy_cmd_priv_t priv; 419 420 421 ASSERT(*cmd != NULL); 422 423 priv = (*cmd)->dp_private; 424 channel = priv->pr_channel; 425 426 /* if we initialized the blocking state, clean it up too */ 427 if (priv->pr_block_init) { 428 cv_destroy(&priv->pr_cv); 429 mutex_destroy(&priv->pr_mutex); 430 } 431 432 channel->ch_cb->cb_cmd_free(channel->ch_channel_private, cmd); 433 } 434 435 436 /* 437 * dcopy_cmd_post() 438 */ 439 int 440 dcopy_cmd_post(dcopy_cmd_t cmd) 441 { 442 dcopy_handle_t channel; 443 int e; 444 445 446 channel = cmd->dp_private->pr_channel; 447 448 atomic_inc_64(&channel->ch_stat.cs_cmd_post.value.ui64); 449 if (cmd->dp_cmd == DCOPY_CMD_COPY) { 450 atomic_add_64(&channel->ch_stat.cs_bytes_xfer.value.ui64, 451 cmd->dp.copy.cc_size); 452 } 453 e = channel->ch_cb->cb_cmd_post(channel->ch_channel_private, cmd); 454 if (e != DCOPY_SUCCESS) { 455 return (e); 456 } 457 458 return (DCOPY_SUCCESS); 459 } 460 461 462 /* 463 * dcopy_cmd_poll() 464 */ 465 int 466 dcopy_cmd_poll(dcopy_cmd_t cmd, int flags) 467 { 468 dcopy_handle_t channel; 469 dcopy_cmd_priv_t priv; 470 int e; 471 472 473 priv = cmd->dp_private; 474 channel = priv->pr_channel; 475 476 /* 477 * if the caller is trying to block, they needed to post the 478 * command with DCOPY_CMD_INTR set. 479 */ 480 if ((flags & DCOPY_POLL_BLOCK) && !(cmd->dp_flags & DCOPY_CMD_INTR)) { 481 return (DCOPY_FAILURE); 482 } 483 484 atomic_inc_64(&channel->ch_stat.cs_cmd_poll.value.ui64); 485 486 repoll: 487 e = channel->ch_cb->cb_cmd_poll(channel->ch_channel_private, cmd); 488 if (e == DCOPY_PENDING) { 489 /* 490 * if the command is still active, and the blocking flag 491 * is set. 492 */ 493 if (flags & DCOPY_POLL_BLOCK) { 494 495 /* 496 * if we haven't initialized the state, do it now. A 497 * command can be re-used, so it's possible it's 498 * already been initialized. 499 */ 500 if (!priv->pr_block_init) { 501 priv->pr_block_init = B_TRUE; 502 mutex_init(&priv->pr_mutex, NULL, MUTEX_DRIVER, 503 NULL); 504 cv_init(&priv->pr_cv, NULL, CV_DRIVER, NULL); 505 priv->pr_cmd = cmd; 506 } 507 508 /* push it on the list for blocking commands */ 509 priv->pr_wait = B_TRUE; 510 dcopy_list_push(&channel->ch_poll_list, priv); 511 512 mutex_enter(&priv->pr_mutex); 513 /* 514 * it's possible we already cleared pr_wait before we 515 * grabbed the mutex. 516 */ 517 if (priv->pr_wait) { 518 cv_wait(&priv->pr_cv, &priv->pr_mutex); 519 } 520 mutex_exit(&priv->pr_mutex); 521 522 /* 523 * the command has completed, go back and poll so we 524 * get the status. 525 */ 526 goto repoll; 527 } 528 } 529 530 return (e); 531 } 532 533 /* *** END OF EXTERNAL INTERFACE *** */ 534 535 /* 536 * dcopy_list_init() 537 */ 538 static int 539 dcopy_list_init(dcopy_list_t *list, size_t node_size, offset_t link_offset) 540 { 541 mutex_init(&list->dl_mutex, NULL, MUTEX_DRIVER, NULL); 542 list_create(&list->dl_list, node_size, link_offset); 543 list->dl_cnt = 0; 544 545 return (DCOPY_SUCCESS); 546 } 547 548 549 /* 550 * dcopy_list_fini() 551 */ 552 static void 553 dcopy_list_fini(dcopy_list_t *list) 554 { 555 list_destroy(&list->dl_list); 556 mutex_destroy(&list->dl_mutex); 557 } 558 559 560 /* 561 * dcopy_list_push() 562 */ 563 static void 564 dcopy_list_push(dcopy_list_t *list, void *list_node) 565 { 566 mutex_enter(&list->dl_mutex); 567 list_insert_tail(&list->dl_list, list_node); 568 list->dl_cnt++; 569 mutex_exit(&list->dl_mutex); 570 } 571 572 573 /* 574 * dcopy_list_pop() 575 */ 576 static void * 577 dcopy_list_pop(dcopy_list_t *list) 578 { 579 list_node_t *list_node; 580 581 mutex_enter(&list->dl_mutex); 582 list_node = list_head(&list->dl_list); 583 if (list_node == NULL) { 584 mutex_exit(&list->dl_mutex); 585 return (list_node); 586 } 587 list->dl_cnt--; 588 list_remove(&list->dl_list, list_node); 589 mutex_exit(&list->dl_mutex); 590 591 return (list_node); 592 } 593 594 595 /* *** DEVICE INTERFACE *** */ 596 /* 597 * dcopy_device_register() 598 */ 599 int 600 dcopy_device_register(void *device_private, dcopy_device_info_t *info, 601 dcopy_device_handle_t *handle) 602 { 603 struct dcopy_channel_s *channel; 604 struct dcopy_device_s *device; 605 int e; 606 int i; 607 608 609 /* initialize the per device state */ 610 device = kmem_zalloc(sizeof (*device), KM_SLEEP); 611 device->dc_device_private = device_private; 612 device->dc_info = *info; 613 device->dc_removing_cnt = 0; 614 device->dc_cb = info->di_cb; 615 616 /* 617 * we have a per device channel list so we can remove a device in the 618 * future. 619 */ 620 e = dcopy_list_init(&device->dc_devchan_list, 621 sizeof (struct dcopy_channel_s), 622 offsetof(struct dcopy_channel_s, ch_devchan_list_node)); 623 if (e != DCOPY_SUCCESS) { 624 goto registerfail_devchan; 625 } 626 627 /* 628 * allocate state for each channel, allocate the channel, and then add 629 * the devices dma channels to the devices channel list. 630 */ 631 for (i = 0; i < info->di_num_dma; i++) { 632 channel = kmem_zalloc(sizeof (*channel), KM_SLEEP); 633 channel->ch_device = device; 634 channel->ch_removing = B_FALSE; 635 channel->ch_ref_cnt = 0; 636 channel->ch_cb = info->di_cb; 637 638 e = info->di_cb->cb_channel_alloc(device_private, channel, 639 DCOPY_SLEEP, dcopy_channel_size, &channel->ch_info, 640 &channel->ch_channel_private); 641 if (e != DCOPY_SUCCESS) { 642 kmem_free(channel, sizeof (*channel)); 643 goto registerfail_alloc; 644 } 645 646 e = dcopy_stats_init(channel); 647 if (e != DCOPY_SUCCESS) { 648 info->di_cb->cb_channel_free( 649 &channel->ch_channel_private); 650 kmem_free(channel, sizeof (*channel)); 651 goto registerfail_alloc; 652 } 653 654 e = dcopy_list_init(&channel->ch_poll_list, 655 sizeof (struct dcopy_cmd_priv_s), 656 offsetof(struct dcopy_cmd_priv_s, pr_poll_list_node)); 657 if (e != DCOPY_SUCCESS) { 658 dcopy_stats_fini(channel); 659 info->di_cb->cb_channel_free( 660 &channel->ch_channel_private); 661 kmem_free(channel, sizeof (*channel)); 662 goto registerfail_alloc; 663 } 664 665 dcopy_list_push(&device->dc_devchan_list, channel); 666 } 667 668 /* add the device to device list */ 669 dcopy_list_push(&dcopy_statep->d_device_list, device); 670 671 /* 672 * add the device's dma channels to the global channel list (where 673 * dcopy_alloc's come from) 674 */ 675 mutex_enter(&dcopy_statep->d_globalchan_list.dl_mutex); 676 mutex_enter(&dcopy_statep->d_device_list.dl_mutex); 677 channel = list_head(&device->dc_devchan_list.dl_list); 678 while (channel != NULL) { 679 list_insert_tail(&dcopy_statep->d_globalchan_list.dl_list, 680 channel); 681 dcopy_statep->d_globalchan_list.dl_cnt++; 682 channel = list_next(&device->dc_devchan_list.dl_list, channel); 683 } 684 mutex_exit(&dcopy_statep->d_device_list.dl_mutex); 685 mutex_exit(&dcopy_statep->d_globalchan_list.dl_mutex); 686 687 *handle = device; 688 689 /* last call-back into kernel for dcopy KAPI enabled */ 690 uioa_dcopy_enable(); 691 692 return (DCOPY_SUCCESS); 693 694 registerfail_alloc: 695 channel = list_head(&device->dc_devchan_list.dl_list); 696 while (channel != NULL) { 697 /* remove from the list */ 698 channel = dcopy_list_pop(&device->dc_devchan_list); 699 ASSERT(channel != NULL); 700 701 dcopy_list_fini(&channel->ch_poll_list); 702 dcopy_stats_fini(channel); 703 info->di_cb->cb_channel_free(&channel->ch_channel_private); 704 kmem_free(channel, sizeof (*channel)); 705 } 706 707 dcopy_list_fini(&device->dc_devchan_list); 708 registerfail_devchan: 709 kmem_free(device, sizeof (*device)); 710 711 return (DCOPY_FAILURE); 712 } 713 714 715 /* 716 * dcopy_device_unregister() 717 */ 718 /*ARGSUSED*/ 719 int 720 dcopy_device_unregister(dcopy_device_handle_t *handle) 721 { 722 struct dcopy_channel_s *channel; 723 dcopy_device_handle_t device; 724 boolean_t device_busy; 725 726 /* first call-back into kernel for dcopy KAPI disable */ 727 uioa_dcopy_disable(); 728 729 device = *handle; 730 device_busy = B_FALSE; 731 732 /* 733 * remove the devices dma channels from the global channel list (where 734 * dcopy_alloc's come from) 735 */ 736 mutex_enter(&dcopy_statep->d_globalchan_list.dl_mutex); 737 mutex_enter(&device->dc_devchan_list.dl_mutex); 738 channel = list_head(&device->dc_devchan_list.dl_list); 739 while (channel != NULL) { 740 /* 741 * if the channel has outstanding allocs, mark it as having 742 * to be removed and increment the number of channels which 743 * need to be removed in the device state too. 744 */ 745 if (channel->ch_ref_cnt != 0) { 746 channel->ch_removing = B_TRUE; 747 device_busy = B_TRUE; 748 device->dc_removing_cnt++; 749 } 750 dcopy_statep->d_globalchan_list.dl_cnt--; 751 list_remove(&dcopy_statep->d_globalchan_list.dl_list, channel); 752 channel = list_next(&device->dc_devchan_list.dl_list, channel); 753 } 754 mutex_exit(&device->dc_devchan_list.dl_mutex); 755 mutex_exit(&dcopy_statep->d_globalchan_list.dl_mutex); 756 757 /* 758 * if there are channels which still need to be removed, we will clean 759 * up the device state after they are freed up. 760 */ 761 if (device_busy) { 762 return (DCOPY_PENDING); 763 } 764 765 dcopy_device_cleanup(device, B_FALSE); 766 767 *handle = NULL; 768 return (DCOPY_SUCCESS); 769 } 770 771 772 /* 773 * dcopy_device_cleanup() 774 */ 775 static void 776 dcopy_device_cleanup(dcopy_device_handle_t device, boolean_t do_callback) 777 { 778 struct dcopy_channel_s *channel; 779 780 /* 781 * remove all the channels in the device list, free them, and clean up 782 * the state. 783 */ 784 mutex_enter(&dcopy_statep->d_device_list.dl_mutex); 785 channel = list_head(&device->dc_devchan_list.dl_list); 786 while (channel != NULL) { 787 device->dc_devchan_list.dl_cnt--; 788 list_remove(&device->dc_devchan_list.dl_list, channel); 789 dcopy_list_fini(&channel->ch_poll_list); 790 dcopy_stats_fini(channel); 791 channel->ch_cb->cb_channel_free(&channel->ch_channel_private); 792 kmem_free(channel, sizeof (*channel)); 793 channel = list_head(&device->dc_devchan_list.dl_list); 794 } 795 796 /* remove it from the list of devices */ 797 list_remove(&dcopy_statep->d_device_list.dl_list, device); 798 799 mutex_exit(&dcopy_statep->d_device_list.dl_mutex); 800 801 /* 802 * notify the DMA device driver that the device is free to be 803 * detached. 804 */ 805 if (do_callback) { 806 device->dc_cb->cb_unregister_complete( 807 device->dc_device_private, DCOPY_SUCCESS); 808 } 809 810 dcopy_list_fini(&device->dc_devchan_list); 811 kmem_free(device, sizeof (*device)); 812 } 813 814 815 /* 816 * dcopy_device_channel_notify() 817 */ 818 /*ARGSUSED*/ 819 void 820 dcopy_device_channel_notify(dcopy_handle_t handle, int status) 821 { 822 struct dcopy_channel_s *channel; 823 dcopy_list_t *poll_list; 824 dcopy_cmd_priv_t priv; 825 int e; 826 827 828 ASSERT(status == DCOPY_COMPLETION); 829 channel = handle; 830 831 poll_list = &channel->ch_poll_list; 832 833 /* 834 * when we get a completion notification from the device, go through 835 * all of the commands blocking on this channel and see if they have 836 * completed. Remove the command and wake up the block thread if they 837 * have. Once we hit a command which is still pending, we are done 838 * polling since commands in a channel complete in order. 839 */ 840 mutex_enter(&poll_list->dl_mutex); 841 if (poll_list->dl_cnt != 0) { 842 priv = list_head(&poll_list->dl_list); 843 while (priv != NULL) { 844 atomic_inc_64(&channel-> 845 ch_stat.cs_notify_poll.value.ui64); 846 e = channel->ch_cb->cb_cmd_poll( 847 channel->ch_channel_private, 848 priv->pr_cmd); 849 if (e == DCOPY_PENDING) { 850 atomic_inc_64(&channel-> 851 ch_stat.cs_notify_pending.value.ui64); 852 break; 853 } 854 855 poll_list->dl_cnt--; 856 list_remove(&poll_list->dl_list, priv); 857 858 mutex_enter(&priv->pr_mutex); 859 priv->pr_wait = B_FALSE; 860 cv_signal(&priv->pr_cv); 861 mutex_exit(&priv->pr_mutex); 862 863 priv = list_head(&poll_list->dl_list); 864 } 865 } 866 867 mutex_exit(&poll_list->dl_mutex); 868 } 869 870 871 /* 872 * dcopy_stats_init() 873 */ 874 static int 875 dcopy_stats_init(dcopy_handle_t channel) 876 { 877 #define CHANSTRSIZE 20 878 char chanstr[CHANSTRSIZE]; 879 dcopy_stats_t *stats; 880 int instance; 881 char *name; 882 883 884 stats = &channel->ch_stat; 885 name = (char *)ddi_driver_name(channel->ch_device->dc_info.di_dip); 886 instance = ddi_get_instance(channel->ch_device->dc_info.di_dip); 887 888 (void) snprintf(chanstr, CHANSTRSIZE, "channel%d", 889 (uint32_t)channel->ch_info.qc_chan_num); 890 891 channel->ch_kstat = kstat_create(name, instance, chanstr, "misc", 892 KSTAT_TYPE_NAMED, sizeof (dcopy_stats_t) / sizeof (kstat_named_t), 893 KSTAT_FLAG_VIRTUAL); 894 if (channel->ch_kstat == NULL) { 895 return (DCOPY_FAILURE); 896 } 897 channel->ch_kstat->ks_data = stats; 898 899 kstat_named_init(&stats->cs_bytes_xfer, "bytes_xfer", 900 KSTAT_DATA_UINT64); 901 kstat_named_init(&stats->cs_cmd_alloc, "cmd_alloc", 902 KSTAT_DATA_UINT64); 903 kstat_named_init(&stats->cs_cmd_post, "cmd_post", 904 KSTAT_DATA_UINT64); 905 kstat_named_init(&stats->cs_cmd_poll, "cmd_poll", 906 KSTAT_DATA_UINT64); 907 kstat_named_init(&stats->cs_notify_poll, "notify_poll", 908 KSTAT_DATA_UINT64); 909 kstat_named_init(&stats->cs_notify_pending, "notify_pending", 910 KSTAT_DATA_UINT64); 911 kstat_named_init(&stats->cs_id, "id", 912 KSTAT_DATA_UINT64); 913 kstat_named_init(&stats->cs_capabilities, "capabilities", 914 KSTAT_DATA_UINT64); 915 916 kstat_install(channel->ch_kstat); 917 918 channel->ch_stat.cs_id.value.ui64 = channel->ch_info.qc_id; 919 channel->ch_stat.cs_capabilities.value.ui64 = 920 channel->ch_info.qc_capabilities; 921 922 return (DCOPY_SUCCESS); 923 } 924 925 926 /* 927 * dcopy_stats_fini() 928 */ 929 static void 930 dcopy_stats_fini(dcopy_handle_t channel) 931 { 932 kstat_delete(channel->ch_kstat); 933 } 934 /* *** END OF DEVICE INTERFACE *** */