1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/debug.h> 28 #include <sys/ksynch.h> 29 #include <sys/kmem.h> 30 #include <sys/cmn_err.h> 31 #include <sys/errno.h> 32 #include <sys/ddi.h> 33 34 #include <sys/ncall/ncall.h> 35 36 #define __NSC_GEN__ 37 #include "nsc_dev.h" 38 39 #ifdef DS_DDICT 40 #include "../contract.h" 41 #endif 42 43 #include "../nsctl.h" 44 45 #define NSC_DEVMIN "DevMin" 46 #define NSC_DEVMAJ "DevMaj" 47 48 #define _I(x) (((long)(&((nsc_io_t *)0)->x))/sizeof (long)) 49 #define _F(x) (((long)(&((nsc_fd_t *)0)->x))/sizeof (long)) 50 51 52 nsc_def_t _nsc_io_def[] = { 53 { "Open", (uintptr_t)nsc_null, _I(open) }, 54 { "Close", (uintptr_t)nsc_null, _I(close) }, 55 { "Attach", (uintptr_t)nsc_null, _I(attach) }, 56 { "Detach", (uintptr_t)nsc_null, _I(detach) }, 57 { "Flush", (uintptr_t)nsc_null, _I(flush) }, 58 { "Provide", (uintptr_t)NULL, _I(provide) }, 59 { NULL, (uintptr_t)NULL, 0 } 60 }; 61 62 nsc_def_t _nsc_fd_def[] = { 63 { "Pinned", (uintptr_t)nsc_null, _F(sf_pinned) }, 64 { "Unpinned", (uintptr_t)nsc_null, _F(sf_unpinned) }, 65 { "Attach", (uintptr_t)nsc_null, _F(sf_attach) }, 66 { "Detach", (uintptr_t)nsc_null, _F(sf_detach) }, 67 { "Flush", (uintptr_t)nsc_null, _F(sf_flush) }, 68 { NULL, (uintptr_t)NULL, 0 } 69 }; 70 71 kmutex_t _nsc_io_lock; 72 kmutex_t _nsc_devval_lock; 73 74 nsc_io_t *_nsc_io_top = NULL; 75 nsc_io_t *_nsc_null_io = NULL; 76 nsc_dev_t *_nsc_dev_top = NULL; 77 nsc_dev_t *_nsc_dev_pend = NULL; 78 nsc_path_t *_nsc_path_top = NULL; 79 nsc_devval_t *_nsc_devval_top = NULL; 80 81 extern nsc_def_t _nsc_disk_def[]; 82 extern nsc_def_t _nsc_cache_def[]; 83 84 extern nsc_mem_t *_nsc_local_mem; 85 extern nsc_rmmap_t *_nsc_global_map; 86 87 static clock_t _nsc_io_lbolt; 88 89 static nsc_io_t *_nsc_find_io(char *, int, int *); 90 nsc_io_t *_nsc_reserve_io(char *, int); 91 static nsc_io_t *_nsc_alloc_io(int, char *, int); 92 93 static int _nsc_open_fn(nsc_fd_t *, int); 94 static int _nsc_close_fn(nsc_fd_t *); 95 static int _nsc_alloc_fd(char *, int, int, nsc_fd_t **); 96 static int _nsc_alloc_iodev(nsc_dev_t *, int, nsc_iodev_t **); 97 static int _nsc_alloc_dev(char *, nsc_dev_t **); 98 static int _nsc_reopen_io(char *, int); 99 static int _nsc_reopen_dev(nsc_dev_t *, int); 100 static int _nsc_relock_dev(nsc_dev_t *, nsc_fd_t *, nsc_iodev_t *); 101 static int _nsc_reopen_fd(nsc_fd_t *, int); 102 static int _nsc_decode_io(nsc_def_t *, nsc_io_t *); 103 104 void _nsc_release_io(nsc_io_t *); 105 static void _nsc_free_fd(nsc_fd_t *); 106 static void _nsc_free_iodev(nsc_iodev_t *); 107 static void _nsc_free_dev(nsc_dev_t *); 108 static void _nsc_free_io(nsc_io_t *); 109 static void _nsc_relink_fd(nsc_fd_t *, nsc_fd_t **, nsc_fd_t **, nsc_iodev_t *); 110 111 static int _nsc_setval(nsc_dev_t *, char *, char *, int, int); 112 static void r_nsc_setval(ncall_t *, int *); 113 static void r_nsc_setval_all(ncall_t *, int *); 114 115 extern void _nsc_add_disk(nsc_io_t *); 116 extern void _nsc_add_cache(nsc_io_t *); 117 118 119 /* 120 * void 121 * _nsc_init_dev (void) 122 * Initialise device subsystem. 123 * 124 * Calling/Exit State: 125 * Called at driver initialisation time to allocate necessary 126 * data structures. 127 */ 128 void 129 _nsc_init_dev() 130 { 131 mutex_init(&_nsc_io_lock, NULL, MUTEX_DRIVER, NULL); 132 mutex_init(&_nsc_devval_lock, NULL, MUTEX_DRIVER, NULL); 133 134 _nsc_null_io = nsc_register_io("null", NSC_NULL, (nsc_def_t *)0); 135 136 if (!_nsc_null_io) 137 cmn_err(CE_PANIC, "nsctl: nsc_init_dev"); 138 139 ncall_register_svc(NSC_SETVAL_ALL, r_nsc_setval_all); 140 ncall_register_svc(NSC_SETVAL, r_nsc_setval); 141 } 142 143 144 void 145 _nsc_deinit_dev() 146 { 147 nsc_devval_t *dv; 148 nsc_val_t *vp; 149 150 mutex_enter(&_nsc_devval_lock); 151 152 while ((dv = _nsc_devval_top) != NULL) { 153 while ((vp = dv->dv_values) != NULL) { 154 dv->dv_values = vp->sv_next; 155 nsc_kmem_free(vp, sizeof (*vp)); 156 } 157 158 _nsc_devval_top = dv->dv_next; 159 nsc_kmem_free(dv, sizeof (*dv)); 160 } 161 162 mutex_exit(&_nsc_devval_lock); 163 164 ncall_unregister_svc(NSC_SETVAL_ALL); 165 ncall_unregister_svc(NSC_SETVAL); 166 167 mutex_destroy(&_nsc_devval_lock); 168 mutex_destroy(&_nsc_io_lock); 169 } 170 171 172 /* 173 * nsc_io_t * 174 * nsc_register_io (char *name, int type, nsc_def_t *def) 175 * Register an I/O module. 176 * 177 * Calling/Exit State: 178 * Returns a token for use in future calls to nsc_unregister_io. 179 * The ID and flags for the module are specified by 'type' and 180 * the appropriate entry points are defined using 'def'. If 181 * registration fails NULL is returned. 182 * 183 * Description: 184 * Registers an I/O module for use by subsequent calls to 185 * nsc_open. 186 */ 187 nsc_io_t * 188 nsc_register_io(name, type, def) 189 char *name; 190 int type; 191 nsc_def_t *def; 192 { 193 nsc_io_t *io, *tp; 194 int rc, id, flag; 195 nsc_io_t **iop; 196 197 id = (type & NSC_TYPES); 198 flag = (type & ~NSC_TYPES); 199 200 if ((!(id & NSC_ID) || (id & ~NSC_IDS)) && 201 (id != NSC_NULL || _nsc_null_io)) 202 return (NULL); 203 204 if (!(io = _nsc_alloc_io(id, name, flag))) 205 return (NULL); 206 207 rc = _nsc_decode_io(def, io); 208 209 if (!rc && id != NSC_NULL) { 210 _nsc_free_io(io); 211 return (NULL); 212 } 213 214 mutex_enter(&_nsc_io_lock); 215 216 for (tp = _nsc_io_top; tp; tp = tp->next) { 217 if (strcmp(tp->name, name) == 0 || tp->id == id) { 218 mutex_exit(&_nsc_io_lock); 219 _nsc_free_io(io); 220 return (NULL); 221 } 222 } 223 224 for (iop = &_nsc_io_top; *iop; iop = &(*iop)->next) 225 if (id >= (*iop)->id) 226 break; 227 228 io->next = (*iop); 229 (*iop) = io; 230 231 _nsc_io_lbolt = nsc_lbolt(); 232 233 while ((rc = _nsc_reopen_io(NULL, 0)) != 0) 234 if (rc != ERESTART) 235 break; 236 237 mutex_exit(&_nsc_io_lock); 238 return (io); 239 } 240 241 242 /* 243 * static int 244 * _nsc_decode_io (nsc_def_t *def, nsc_io_t *io) 245 * Decode I/O module definition. 246 * 247 * Calling/Exit State: 248 * Returns TRUE if the definition contains an adequate 249 * description of an I/O module. 250 * 251 * Description: 252 * Decode the definition of an I/O module and supply 253 * translation routines where possible for operations 254 * that are not defined. 255 */ 256 static int 257 _nsc_decode_io(def, io) 258 nsc_def_t *def; 259 nsc_io_t *io; 260 { 261 nsc_decode_param(def, _nsc_io_def, (long *)io); 262 nsc_decode_param(def, _nsc_disk_def, (long *)io); 263 nsc_decode_param(def, _nsc_cache_def, (long *)io); 264 265 _nsc_add_disk(io); 266 _nsc_add_cache(io); 267 268 return (1); 269 } 270 271 272 /* 273 * int 274 * nsc_unregister_io (nsc_io_t *io, int flag) 275 * Un-register an I/O module. 276 * 277 * Calling/Exit State: 278 * Returns 0 on success, otherwise returns an error code. 279 * 280 * Description: 281 * The specified I/O module is un-registered if possible. 282 * All open file descriptors using the module will be closed 283 * in preparation for a subsequent re-open. 284 * 285 * If NSC_PCATCH is specified and a signal is received, 286 * the unregister will be terminated and EINTR returned. 287 */ 288 int 289 nsc_unregister_io(nsc_io_t *io, int flag) 290 { 291 nsc_path_t *sp; 292 nsc_io_t *xio; 293 int rc = 0; 294 295 if (io == _nsc_null_io) 296 return (EINVAL); 297 298 mutex_enter(&_nsc_io_lock); 299 300 for (xio = _nsc_io_top; xio; xio = xio->next) 301 if (xio == io) 302 break; 303 304 if (!xio || io->pend) { 305 mutex_exit(&_nsc_io_lock); 306 return (xio ? EALREADY : 0); 307 } 308 309 io->pend = 1; 310 lp: 311 for (sp = _nsc_path_top; sp; sp = sp->sp_next) 312 if (sp->sp_io == io) { 313 mutex_exit(&_nsc_io_lock); 314 315 if ((rc = nsc_unregister_path(sp, flag)) != 0) { 316 io->pend = 0; 317 return (rc); 318 } 319 320 mutex_enter(&_nsc_io_lock); 321 goto lp; 322 } 323 324 _nsc_io_lbolt = nsc_lbolt(); 325 326 while (io->refcnt && !rc) { 327 while ((rc = _nsc_reopen_io(NULL, flag)) != 0) 328 if (rc != ERESTART) 329 break; 330 331 if (rc || !io->refcnt) 332 break; 333 334 if (!cv_wait_sig(&io->cv, &_nsc_io_lock)) 335 rc = EINTR; 336 } 337 338 /* 339 * We have tried to get rid of all the IO provider's clients. 340 * If there are still anonymous buffers outstanding, then fail 341 * the unregister. 342 */ 343 344 if (!rc && io->abufcnt > 0) 345 rc = EUSERS; 346 347 if (rc) 348 io->pend = 0; 349 350 mutex_exit(&_nsc_io_lock); 351 352 if (!rc) 353 _nsc_free_io(io); 354 355 return (rc); 356 } 357 358 359 /* 360 * nsc_path_t * 361 * nsc_register_path (char *path, int type, nsc_io_t *io) 362 * Register interest in pathname. 363 * 364 * Calling/Exit State: 365 * Returns a token for use in future calls to 366 * nsc_unregister_path. The 'path' argument can contain 367 * wild characters. If registration fails NULL is returned. 368 * May not be called for io providers that support NSC_ANON. 369 * 370 * Description: 371 * Registers an interest in any pathnames matching 'path' 372 * which are opened with the specified type. 373 */ 374 nsc_path_t * 375 nsc_register_path(char *path, int type, nsc_io_t *io) 376 { 377 nsc_path_t *sp, **spp; 378 int rc; 379 380 if ((type & NSC_IDS) || !io || (io->provide & NSC_ANON) || 381 !(sp = nsc_kmem_zalloc(sizeof (*sp), KM_SLEEP, _nsc_local_mem))) 382 return (NULL); 383 384 sp->sp_path = nsc_strdup(path); 385 sp->sp_type = type; 386 sp->sp_io = io; 387 388 mutex_enter(&_nsc_io_lock); 389 390 for (spp = &_nsc_path_top; *spp; spp = &(*spp)->sp_next) 391 if (io->id >= (*spp)->sp_io->id) 392 break; 393 394 sp->sp_next = (*spp); 395 (*spp) = sp; 396 397 _nsc_io_lbolt = nsc_lbolt(); 398 399 while ((rc = _nsc_reopen_io(path, 0)) != 0) 400 if (rc != ERESTART) 401 break; 402 403 mutex_exit(&_nsc_io_lock); 404 return (sp); 405 } 406 407 408 /* 409 * int 410 * nsc_unregister_path (nsc_path_t *sp, int flag) 411 * Un-register interest in pathname. 412 * 413 * Calling/Exit State: 414 * Returns 0 on success, otherwise returns an error code. 415 * 416 * Description: 417 * Interest in the specified pathname is un-registered 418 * if possible. All appropriate file descriptors will be 419 * closed in preparation for a subsequent re-open. 420 * 421 * If NSC_PCATCH is specified and a signal is received, 422 * the unregister will be terminated and EINTR returned. 423 */ 424 int 425 nsc_unregister_path(sp, flag) 426 nsc_path_t *sp; 427 int flag; 428 { 429 nsc_path_t *xsp, **spp; 430 int rc; 431 432 mutex_enter(&_nsc_io_lock); 433 434 for (xsp = _nsc_path_top; xsp; xsp = xsp->sp_next) 435 if (xsp == sp) 436 break; 437 438 if (!xsp || sp->sp_pend) { 439 mutex_exit(&_nsc_io_lock); 440 return (xsp ? EALREADY : 0); 441 } 442 443 sp->sp_pend = 1; 444 _nsc_io_lbolt = nsc_lbolt(); 445 446 while ((rc = _nsc_reopen_io(sp->sp_path, flag)) != 0) 447 if (rc != ERESTART) { 448 sp->sp_pend = 0; 449 mutex_exit(&_nsc_io_lock); 450 return (rc); 451 } 452 453 for (spp = &_nsc_path_top; *spp; spp = &(*spp)->sp_next) 454 if (*spp == sp) 455 break; 456 457 if (*spp) 458 (*spp) = sp->sp_next; 459 460 mutex_exit(&_nsc_io_lock); 461 462 nsc_strfree(sp->sp_path); 463 nsc_kmem_free(sp, sizeof (*sp)); 464 return (0); 465 } 466 467 468 /* 469 * static int 470 * _nsc_reopen_io (char *path, int flag) 471 * Force re-open of all file descriptors. 472 * 473 * Calling/Exit State: 474 * The _nsc_io_lock must be held across calls to 475 * this function. 476 * 477 * Returns 0 if the force succeeds without releasing 478 * _nsc_io_lock, otherwise returns an error code. 479 * 480 * Description: 481 * A re-open is forced for all file descriptors as 482 * appropriate. For performance reasons available 483 * devices are re-opened before those that would block. 484 */ 485 static int 486 _nsc_reopen_io(path, flag) 487 char *path; 488 int flag; 489 { 490 nsc_dev_t *dp, *dev; 491 int rc, errno = 0; 492 int try, run; 493 494 for (run = 1, try = (NSC_TRY | NSC_DEFER); run--; try = 0) { 495 for (dev = _nsc_dev_top; dev; dev = dev->nsc_next) { 496 if (path && !nsc_strmatch(dev->nsc_path, path)) 497 continue; 498 499 if (!(rc = _nsc_reopen_dev(dev, flag | try))) 500 continue; 501 502 for (dp = _nsc_dev_top; dp; dp = dp->nsc_next) 503 if (dp == dev) 504 break; 505 506 if (!dp) 507 return (ERESTART); 508 509 if (try && !(flag & NSC_TRY)) 510 run = 1; 511 if (!run && errno != ERESTART) 512 errno = rc; 513 } 514 } 515 516 return (errno); 517 } 518 519 520 /* 521 * static int 522 * _nsc_reopen_dev (nsc_dev_t *dev, int flag) 523 * Force re-open of entire device. 524 * 525 * Calling/Exit State: 526 * The _nsc_io_lock must be held across calls to 527 * this function. 528 * 529 * Returns 0 if the force succeeds without releasing 530 * _nsc_io_lock, otherwise returns an error code. 531 * 532 * Description: 533 * A re-open is forced for all file descriptors for the 534 * device as appropriate. 535 */ 536 static int 537 _nsc_reopen_dev(dev, flag) 538 nsc_dev_t *dev; 539 int flag; 540 { 541 int rc, errno = 0; 542 nsc_iodev_t *iodev; 543 int try, run; 544 nsc_fd_t *fd; 545 546 mutex_enter(&dev->nsc_lock); 547 548 for (run = 1, try = (NSC_TRY | NSC_DEFER); run--; try = 0) 549 for (iodev = dev->nsc_list; iodev; iodev = iodev->si_next) { 550 for (fd = iodev->si_open; fd; fd = fd->sf_next) { 551 if (!(rc = _nsc_reopen_fd(fd, flag | try))) 552 continue; 553 554 if (rc == -ERESTART) 555 return (ERESTART); 556 557 if (!_nsc_relock_dev(dev, fd, iodev)) 558 return (ERESTART); 559 560 if (try && !(flag & NSC_TRY)) 561 run = 1; 562 if (!run && errno != ERESTART) 563 errno = rc; 564 } 565 } 566 567 for (run = 1, try = (NSC_TRY | NSC_DEFER); run--; try = 0) 568 for (fd = dev->nsc_close; fd; fd = fd->sf_next) { 569 if (!(rc = _nsc_reopen_fd(fd, flag | try))) 570 continue; 571 572 if (rc == -ERESTART) 573 return (ERESTART); 574 575 if (!_nsc_relock_dev(dev, fd, NULL)) 576 return (ERESTART); 577 578 if (try && !(flag & NSC_TRY)) 579 run = 1; 580 if (!run && errno != ERESTART) 581 errno = rc; 582 } 583 584 mutex_exit(&dev->nsc_lock); 585 return (errno); 586 } 587 588 589 /* 590 * static int 591 * _nsc_relock_dev (nsc_dev_t *dev, nsc_fd_t *fd, nsc_iodev_t *iodev) 592 * Relock device structure if possible. 593 * 594 * Calling/Exit State: 595 * The _nsc_io_lock must be held across calls to 596 * this function. 597 * 598 * Checks whether the file descriptor is still part 599 * of the specified device and I/O device. If so the 600 * device lock is taken. Otherwise FALSE is returned. 601 */ 602 static int 603 _nsc_relock_dev(nsc_dev_t *dev, nsc_fd_t *fd, nsc_iodev_t *iodev) 604 { 605 nsc_fd_t *fp = NULL; 606 nsc_iodev_t *iop; 607 nsc_dev_t *dp; 608 609 for (dp = _nsc_dev_top; dp; dp = dp->nsc_next) 610 if (dp == dev) 611 break; 612 613 if (!dp) 614 return (0); 615 616 mutex_enter(&dev->nsc_lock); 617 618 if (iodev) 619 for (iop = dev->nsc_list; iop; iop = iop->si_next) 620 if (iop == iodev) 621 break; 622 623 if (!iodev || iop) { 624 fp = (iodev) ? iodev->si_open : dev->nsc_close; 625 626 for (; fp; fp = fp->sf_next) 627 if (fp == fd) 628 break; 629 } 630 631 if (!fp) { 632 mutex_exit(&dev->nsc_lock); 633 return (0); 634 } 635 636 return (1); 637 } 638 639 640 /* 641 * static int 642 * _nsc_reopen_fd (nsc_fd_t *dev, int flag) 643 * Force re-open of file descriptor. 644 * 645 * Calling/Exit State: 646 * Both _nsc_io_lock and the device lock must be held 647 * across calls to this function. 648 * 649 * Returns 0 if the force succeeds without releasing 650 * any locks, otherwise returns an error code. If an 651 * error code is returned the device lock is released. 652 * 653 * Description: 654 * If appropriate the file descriptor is closed in order 655 * to force a subsequent open using the currently available 656 * resources. 657 */ 658 static int 659 _nsc_reopen_fd(fd, flag) 660 nsc_fd_t *fd; 661 int flag; 662 { 663 nsc_dev_t *dev = fd->sf_dev; 664 nsc_iodev_t *iodev = fd->sf_iodev; 665 int changed = 0; 666 int rc; 667 668 if (!fd->sf_pend && !iodev) 669 return (0); 670 671 if (fd->sf_pend == _NSC_OPEN) 672 if (fd->sf_lbolt - _nsc_io_lbolt > 0) 673 return (0); 674 675 if (iodev && 676 (iodev->si_io == 677 _nsc_find_io(dev->nsc_path, fd->sf_type, &changed)) && 678 !changed) 679 return (0); 680 681 if (iodev) 682 fd->sf_reopen = 1; 683 684 mutex_exit(&_nsc_io_lock); 685 686 dev->nsc_reopen = 1; 687 688 rc = _nsc_close_fd(fd, flag); 689 690 dev->nsc_reopen = 0; 691 692 if (rc == EAGAIN && (flag & NSC_DEFER) && fd->sf_reopen) 693 dev->nsc_drop = 1; 694 695 mutex_exit(&dev->nsc_lock); 696 697 if (rc == -ERESTART) 698 delay(2); /* allow other threads cpu time */ 699 700 mutex_enter(&_nsc_io_lock); 701 return (rc ? rc : ERESTART); 702 } 703 704 705 /* 706 * nsc_fd_t * 707 * nsc_open (char *path, int type, nsc_def_t *def, blind_t arg, int *sts) 708 * Open file descriptor for pathname. 709 * 710 * Calling/Exit State: 711 * Returns file descriptor if open succeeds, otherwise 712 * returns 0 and puts error code in the location pointed 713 * to by sts. 714 * 715 * Description: 716 * Open the specified pathname using an appropriate access 717 * method. 718 */ 719 nsc_fd_t * 720 nsc_open(path, type, def, arg, sts) 721 char *path; 722 int type; 723 nsc_def_t *def; 724 blind_t arg; 725 int *sts; 726 { 727 int flag, rc; 728 nsc_fd_t *fd; 729 730 flag = (type & ~NSC_TYPES); 731 type &= NSC_TYPES; 732 733 if ((flag & NSC_READ) == 0) 734 flag |= NSC_RDWR; 735 736 if ((rc = _nsc_alloc_fd(path, type, flag, &fd)) != 0) { 737 if (sts) 738 *sts = rc; 739 return (NULL); 740 } 741 742 fd->sf_arg = arg; 743 fd->sf_aio = _nsc_null_io; 744 745 nsc_decode_param(def, _nsc_fd_def, (long *)fd); 746 747 mutex_enter(&fd->sf_dev->nsc_lock); 748 749 while ((rc = _nsc_open_fd(fd, flag)) != 0) 750 if (rc != ERESTART) 751 break; 752 753 mutex_exit(&fd->sf_dev->nsc_lock); 754 755 if (rc) { 756 _nsc_free_fd(fd); 757 if (sts) 758 *sts = rc; 759 return (NULL); 760 } 761 762 return (fd); 763 } 764 765 766 /* 767 * int 768 * _nsc_open_fd (nsc_fd_t *fd, int flag) 769 * Open file descriptor. 770 * 771 * Calling/Exit State: 772 * The device lock must be held across calls to 773 * this function. 774 * 775 * Returns 0 if the open succeeds, otherwise 776 * returns an error code. 777 * 778 * Description: 779 * Open the specified file descriptor. 780 */ 781 int 782 _nsc_open_fd(fd, flag) 783 nsc_fd_t *fd; 784 int flag; 785 { 786 nsc_dev_t *dev = fd->sf_dev; 787 int rc; 788 789 if (fd->sf_pend) 790 return (_nsc_wait_dev(dev, flag)); 791 792 if (fd->sf_iodev) 793 return (0); 794 if (flag & NSC_NOBLOCK) 795 return (EAGAIN); 796 797 fd->sf_pend = _NSC_OPEN; 798 fd->sf_lbolt = nsc_lbolt(); 799 800 mutex_exit(&dev->nsc_lock); 801 802 rc = _nsc_open_fn(fd, flag); 803 804 mutex_enter(&dev->nsc_lock); 805 fd->sf_pend = 0; 806 807 if (!rc) 808 fd->sf_iodev->si_pend = 0; 809 810 if (dev->nsc_wait || dev->nsc_refcnt <= 0) 811 cv_broadcast(&dev->nsc_cv); 812 813 return (rc ? rc : ERESTART); 814 } 815 816 817 /* 818 * static int 819 * _nsc_open_fn (nsc_fd_t *fd, int flag) 820 * Allocate I/O device and open file descriptor. 821 * 822 * Calling/Exit State: 823 * No locks may be held across this function. 824 * 825 * If the open succeeds an I/O device will be 826 * attached to the file descriptor, marked as 827 * pending and 0 returned. Otherwise, returns 828 * an error code. 829 * 830 * Description: 831 * Allocate an I/O device and open the specified 832 * file descriptor. 833 */ 834 static int 835 _nsc_open_fn(fd, flag) 836 nsc_fd_t *fd; 837 int flag; 838 { 839 nsc_dev_t *dev = fd->sf_dev; 840 nsc_iodev_t *iodev; 841 int rc; 842 843 if ((rc = _nsc_alloc_iodev(dev, fd->sf_type, &iodev)) != 0) 844 return (rc); 845 846 mutex_enter(&dev->nsc_lock); 847 848 if (iodev->si_pend) { 849 rc = _nsc_wait_dev(dev, flag); 850 mutex_exit(&dev->nsc_lock); 851 _nsc_free_iodev(iodev); 852 return (rc); 853 } 854 855 iodev->si_pend = _NSC_OPEN; 856 mutex_exit(&dev->nsc_lock); 857 858 rc = (*iodev->si_io->open)(dev->nsc_path, 859 (fd->sf_flag & ~NSC_RDWR), &fd->sf_cd, iodev); 860 861 if (rc) { 862 iodev->si_pend = 0; 863 _nsc_free_iodev(iodev); 864 return (rc); 865 } 866 867 /* save away the DevMaj and DevMin values */ 868 if (iodev->si_io->id == NSC_RAW_ID) { 869 rc = _nsc_setval(dev, NULL, NSC_DEVMAJ, 870 (int)getmajor((dev_t)fd->sf_cd), FALSE); 871 #ifdef DEBUG 872 if (rc != 1) { 873 cmn_err(CE_NOTE, "!nsctl: could not set DevMaj (%s:%x)", 874 dev->nsc_path, (int)getmajor((dev_t)fd->sf_cd)); 875 } 876 #endif 877 878 rc = _nsc_setval(dev, NULL, NSC_DEVMIN, 879 (int)getminor((dev_t)fd->sf_cd), FALSE); 880 #ifdef DEBUG 881 if (rc != 1) { 882 cmn_err(CE_NOTE, "!nsctl: could not set DevMin (%s:%x)", 883 dev->nsc_path, (int)getminor((dev_t)fd->sf_cd)); 884 } 885 #endif 886 } 887 888 fd->sf_iodev = iodev; 889 _nsc_relink_fd(fd, &dev->nsc_close, &iodev->si_open, iodev); 890 891 return (0); 892 } 893 894 895 /* 896 * int 897 * nsc_close (nsc_fd_t *fd) 898 * Close file descriptor for pathname. 899 * 900 * Calling/Exit State: 901 * Returns 0 if close succeeds, otherwise returns error 902 * code. 903 * 904 * Description: 905 * Close the specified file descriptor. It is assumed 906 * that all other users of this file descriptor have 907 * finished. Any reserve will be discarded before the 908 * close is performed. 909 */ 910 int 911 nsc_close(fd) 912 nsc_fd_t *fd; 913 { 914 int rc; 915 916 if (!fd) 917 return (0); 918 919 while (fd->sf_reserve) 920 nsc_release(fd); 921 922 mutex_enter(&fd->sf_dev->nsc_lock); 923 924 fd->sf_owner = NULL; 925 926 while ((rc = _nsc_close_fd(fd, 0)) != 0) 927 if (rc != ERESTART) 928 break; 929 930 nsc_decode_param(_nsc_fd_def, _nsc_fd_def, (long *)fd); 931 932 mutex_exit(&fd->sf_dev->nsc_lock); 933 934 if (!rc) 935 _nsc_free_fd(fd); 936 return (rc); 937 } 938 939 940 /* 941 * int 942 * _nsc_close_fd (nsc_fd_t *fd, int flag) 943 * Close file descriptor. 944 * 945 * Calling/Exit State: 946 * The device lock must be held across calls to 947 * this function. 948 * 949 * Returns 0 if the close succeeds, otherwise 950 * returns an error code. 951 * 952 * Description: 953 * Close the specified file descriptor. 954 */ 955 int 956 _nsc_close_fd(fd, flag) 957 nsc_fd_t *fd; 958 int flag; 959 { 960 nsc_dev_t *dev = fd->sf_dev; 961 nsc_iodev_t *iodev; 962 int rc; 963 964 if (fd->sf_pend) { 965 if (fd->sf_pend == _NSC_CLOSE && dev->nsc_reopen != 0) 966 return (-ERESTART); 967 968 return (_nsc_wait_dev(dev, flag)); 969 } 970 971 flag |= NSC_RDWR; 972 iodev = fd->sf_iodev; 973 974 if (!iodev) 975 return (0); 976 977 if ((rc = _nsc_detach_fd(fd, flag)) != 0) 978 return (rc); 979 980 if (iodev->si_pend) 981 return (_nsc_wait_dev(dev, flag)); 982 983 if (iodev->si_open == fd && !fd->sf_next) { 984 if ((rc = _nsc_detach_iodev(iodev, NULL, flag)) != 0) 985 return (rc); 986 987 if (dev->nsc_list == iodev && !iodev->si_next) 988 if ((rc = _nsc_detach_dev(dev, NULL, flag)) != 0) 989 return (rc); 990 } 991 992 if (flag & NSC_NOBLOCK) 993 return (EAGAIN); 994 995 fd->sf_pend = _NSC_CLOSE; 996 iodev->si_pend = _NSC_CLOSE; 997 mutex_exit(&dev->nsc_lock); 998 999 rc = _nsc_close_fn(fd); 1000 1001 mutex_enter(&dev->nsc_lock); 1002 fd->sf_pend = 0; 1003 1004 fd->sf_reopen = 0; 1005 if (rc) 1006 iodev->si_pend = 0; 1007 1008 if (dev->nsc_wait || dev->nsc_refcnt <= 0) 1009 cv_broadcast(&dev->nsc_cv); 1010 1011 return (rc ? rc : ERESTART); 1012 } 1013 1014 1015 /* 1016 * static int 1017 * _nsc_close_fn (nsc_fd_t *fd) 1018 * Close file descriptor and free I/O device. 1019 * 1020 * Calling/Exit State: 1021 * No locks may be held across this function. 1022 * 1023 * Returns 0 if the close succeeds, otherwise 1024 * returns an error code. 1025 * 1026 * If the close succeeds the I/O device will be 1027 * detached from the file descriptor, released 1028 * and 0 returned. Otherwise, returns an error 1029 * code. 1030 * 1031 * Description: 1032 * Close the specified file descriptor and free 1033 * the I/O device. 1034 */ 1035 static int 1036 _nsc_close_fn(fd) 1037 nsc_fd_t *fd; 1038 { 1039 nsc_iodev_t *iodev = fd->sf_iodev; 1040 nsc_dev_t *dev = fd->sf_dev; 1041 int last, rc; 1042 1043 last = (iodev->si_open == fd && !fd->sf_next); 1044 1045 if (last || (iodev->si_io->flag & NSC_REFCNT)) 1046 if ((rc = (*iodev->si_io->close)(fd->sf_cd)) != 0) 1047 return (rc); 1048 1049 fd->sf_iodev = NULL; 1050 _nsc_relink_fd(fd, &iodev->si_open, &dev->nsc_close, iodev); 1051 1052 iodev->si_pend = 0; 1053 _nsc_free_iodev(iodev); 1054 1055 return (0); 1056 } 1057 1058 1059 /* 1060 * void 1061 * nsc_set_owner (nsc_fd_t *fd, nsc_iodev_t *iodev) 1062 * Set owner associated with file descriptor. 1063 * 1064 * Calling/Exit State: 1065 * Sets the owner field in the file descriptor. 1066 */ 1067 void 1068 nsc_set_owner(nsc_fd_t *fd, nsc_iodev_t *iodev) 1069 { 1070 if (fd) { 1071 mutex_enter(&fd->sf_dev->nsc_lock); 1072 fd->sf_owner = iodev; 1073 mutex_exit(&fd->sf_dev->nsc_lock); 1074 } 1075 } 1076 1077 1078 /* 1079 * char * 1080 * nsc_pathname (nsc_fd_t *fd) 1081 * Pathname associated with file descriptor. 1082 * 1083 * Calling/Exit State: 1084 * Returns a pointer to the pathname associated 1085 * with the given file descriptor. 1086 */ 1087 char * 1088 nsc_pathname(fd) 1089 nsc_fd_t *fd; 1090 { 1091 return ((fd) ? (fd->sf_dev->nsc_path) : 0); 1092 } 1093 1094 1095 /* 1096 * int 1097 * nsc_fdpathcmp(nsc_fd_t *fd, uint64_t phash, char *path) 1098 * Compare fd to pathname and hash 1099 * 1100 * Calling/Exit State: 1101 * Returns comparison value like strcmp(3C). 1102 * 1103 * Description: 1104 * Does an optimised comparison of the pathname and associated hash 1105 * value (as returned from nsc_strhash()) against the pathname of 1106 * the filedescriptor, fd. 1107 */ 1108 int 1109 nsc_fdpathcmp(nsc_fd_t *fd, uint64_t phash, char *path) 1110 { 1111 int rc = -1; 1112 1113 if (fd != NULL && fd->sf_dev->nsc_phash == phash) 1114 rc = strcmp(fd->sf_dev->nsc_path, path); 1115 1116 return (rc); 1117 } 1118 1119 1120 static int 1121 _nsc_setval(nsc_dev_t *dev, char *path, char *name, int val, int do_ncall) 1122 { 1123 nsc_devval_t *dv; 1124 nsc_rval_t *rval; 1125 ncall_t *ncall; 1126 nsc_val_t *vp; 1127 uint64_t phash; 1128 char *pp; 1129 int rc; 1130 1131 ASSERT(dev != NULL || path != NULL); 1132 #ifdef DEBUG 1133 if (dev != NULL && path != NULL) { 1134 ASSERT(strcmp(dev->nsc_path, path) == 0); 1135 } 1136 #endif 1137 1138 pp = (dev != NULL) ? dev->nsc_path : path; 1139 1140 if (strlen(name) >= NSC_SETVAL_MAX) { 1141 #ifdef DEBUG 1142 cmn_err(CE_WARN, "!nsc_setval: max name size(%d) exceeded(%d)", 1143 NSC_SETVAL_MAX-1, (int)strlen(name)); 1144 #endif 1145 return (0); 1146 } 1147 1148 phash = nsc_strhash(pp); 1149 1150 mutex_enter(&_nsc_devval_lock); 1151 1152 if (dev != NULL) 1153 dv = dev->nsc_values; 1154 else { 1155 for (dv = _nsc_devval_top; dv != NULL; dv = dv->dv_next) { 1156 if (phash == dv->dv_phash && 1157 strcmp(pp, dv->dv_path) == 0) 1158 /* found dv for device */ 1159 break; 1160 } 1161 } 1162 1163 if (dv == NULL) { 1164 dv = nsc_kmem_zalloc(sizeof (*dv), KM_SLEEP, _nsc_local_mem); 1165 if (dv == NULL) { 1166 mutex_exit(&_nsc_devval_lock); 1167 return (0); 1168 } 1169 1170 (void) strncpy(dv->dv_path, pp, sizeof (dv->dv_path)); 1171 dv->dv_phash = phash; 1172 1173 dv->dv_next = _nsc_devval_top; 1174 _nsc_devval_top = dv; 1175 if (dev != NULL) 1176 dev->nsc_values = dv; 1177 } 1178 1179 for (vp = dv->dv_values; vp; vp = vp->sv_next) { 1180 if (strcmp(vp->sv_name, name) == 0) { 1181 vp->sv_value = val; 1182 break; 1183 } 1184 } 1185 1186 if (vp == NULL) { 1187 vp = nsc_kmem_zalloc(sizeof (*vp), KM_SLEEP, _nsc_local_mem); 1188 if (vp != NULL) { 1189 (void) strncpy(vp->sv_name, name, sizeof (vp->sv_name)); 1190 vp->sv_value = val; 1191 vp->sv_next = dv->dv_values; 1192 dv->dv_values = vp; 1193 } 1194 } 1195 1196 mutex_exit(&_nsc_devval_lock); 1197 1198 /* 1199 * phoenix: ncall the new value to the other node now. 1200 */ 1201 1202 if (vp && do_ncall) { 1203 /* CONSTCOND */ 1204 ASSERT(sizeof (nsc_rval_t) <= NCALL_DATA_SZ); 1205 1206 rval = nsc_kmem_zalloc(sizeof (*rval), KM_SLEEP, 1207 _nsc_local_mem); 1208 if (rval == NULL) { 1209 goto out; 1210 } 1211 1212 rc = ncall_alloc(ncall_mirror(ncall_self()), 0, 0, &ncall); 1213 if (rc == 0) { 1214 (void) strncpy(rval->path, pp, sizeof (rval->path)); 1215 (void) strncpy(rval->name, name, sizeof (rval->name)); 1216 rval->value = val; 1217 1218 rc = ncall_put_data(ncall, rval, sizeof (*rval)); 1219 if (rc == 0) { 1220 /* 1221 * Send synchronously and read a reply 1222 * so that we know that the remote 1223 * setval has completed before this 1224 * function returns and hence whilst 1225 * the device is still reserved on this 1226 * node. 1227 */ 1228 if (ncall_send(ncall, 0, NSC_SETVAL) == 0) 1229 (void) ncall_read_reply(ncall, 1, &rc); 1230 } 1231 1232 ncall_free(ncall); 1233 } 1234 1235 nsc_kmem_free(rval, sizeof (*rval)); 1236 } 1237 1238 out: 1239 return (vp ? 1 : 0); 1240 } 1241 1242 1243 /* ARGSUSED */ 1244 1245 static void 1246 r_nsc_setval(ncall_t *ncall, int *ap) 1247 { 1248 nsc_rval_t *rval; 1249 int rc; 1250 1251 rval = nsc_kmem_zalloc(sizeof (*rval), KM_SLEEP, _nsc_local_mem); 1252 if (rval == NULL) { 1253 ncall_reply(ncall, ENOMEM); 1254 return; 1255 } 1256 1257 rc = ncall_get_data(ncall, rval, sizeof (*rval)); 1258 if (rc != 0) { 1259 ncall_reply(ncall, EFAULT); 1260 return; 1261 } 1262 1263 if (_nsc_setval(NULL, rval->path, rval->name, rval->value, FALSE)) 1264 rc = 0; 1265 else 1266 rc = ENOMEM; 1267 1268 ncall_reply(ncall, rc); 1269 nsc_kmem_free(rval, sizeof (*rval)); 1270 } 1271 1272 1273 /* ARGSUSED */ 1274 1275 static void 1276 r_nsc_setval_all(ncall_t *ncall, int *ap) 1277 { 1278 nsc_rval_t *in = NULL, *out = NULL; 1279 nsc_devval_t *dv; 1280 nsc_val_t *vp; 1281 ncall_t *np; 1282 uint64_t phash; 1283 int rc; 1284 1285 /* CONSTCOND */ 1286 ASSERT(sizeof (nsc_rval_t) <= NCALL_DATA_SZ); 1287 1288 in = nsc_kmem_zalloc(sizeof (*in), KM_SLEEP, _nsc_local_mem); 1289 out = nsc_kmem_zalloc(sizeof (*out), KM_SLEEP, _nsc_local_mem); 1290 if (in == NULL || out == NULL) { 1291 if (in != NULL) { 1292 nsc_kmem_free(in, sizeof (*in)); 1293 in = NULL; 1294 } 1295 if (out != NULL) { 1296 nsc_kmem_free(out, sizeof (*out)); 1297 out = NULL; 1298 } 1299 ncall_reply(ncall, ENOMEM); 1300 } 1301 1302 rc = ncall_get_data(ncall, in, sizeof (*in)); 1303 if (rc != 0) { 1304 ncall_reply(ncall, EFAULT); 1305 return; 1306 } 1307 1308 phash = nsc_strhash(in->path); 1309 1310 (void) strncpy(out->path, in->path, sizeof (out->path)); 1311 1312 rc = ncall_alloc(ncall_mirror(ncall_self()), 0, 0, &np); 1313 if (rc != 0) { 1314 ncall_reply(ncall, ENOMEM); 1315 return; 1316 } 1317 1318 mutex_enter(&_nsc_devval_lock); 1319 1320 for (dv = _nsc_devval_top; dv; dv = dv->dv_next) { 1321 if (dv->dv_phash == phash && 1322 strcmp(dv->dv_path, in->path) == 0) 1323 break; 1324 } 1325 1326 if (dv) { 1327 for (vp = dv->dv_values; vp; vp = vp->sv_next) { 1328 if (strcmp(vp->sv_name, NSC_DEVMIN) == 0 || 1329 strcmp(vp->sv_name, NSC_DEVMAJ) == 0) { 1330 /* ignore the implicit DevMin/DevMaj values */ 1331 continue; 1332 } 1333 1334 (void) strncpy(out->name, vp->sv_name, 1335 sizeof (out->name)); 1336 out->value = vp->sv_value; 1337 1338 rc = ncall_put_data(np, out, sizeof (*out)); 1339 if (rc == 0) { 1340 /* 1341 * Send synchronously and read a reply 1342 * so that we know that the remote 1343 * setval has completed before this 1344 * function returns. 1345 */ 1346 if (ncall_send(np, 0, NSC_SETVAL) == 0) 1347 (void) ncall_read_reply(np, 1, &rc); 1348 } 1349 1350 ncall_reset(np); 1351 } 1352 1353 ncall_free(np); 1354 rc = 0; 1355 } else { 1356 rc = ENODEV; 1357 } 1358 1359 mutex_exit(&_nsc_devval_lock); 1360 1361 ncall_reply(ncall, rc); 1362 1363 nsc_kmem_free(out, sizeof (*out)); 1364 nsc_kmem_free(in, sizeof (*in)); 1365 } 1366 1367 1368 /* 1369 * int 1370 * nsc_setval (nsc_fd_t *fd, char *name, int val) 1371 * Set value for device. 1372 * 1373 * Calling/Exit State: 1374 * Returns 1 if the value has been set, otherwise 0. 1375 * Must be called with the fd reserved. 1376 * 1377 * Description: 1378 * Sets the specified global variable for the device 1379 * to the value provided. 1380 */ 1381 int 1382 nsc_setval(nsc_fd_t *fd, char *name, int val) 1383 { 1384 if (!fd) 1385 return (0); 1386 1387 if (!nsc_held(fd)) 1388 return (0); 1389 1390 return (_nsc_setval(fd->sf_dev, NULL, name, val, TRUE)); 1391 } 1392 1393 1394 /* 1395 * int 1396 * nsc_getval (nsc_fd_t *fd, char *name, int *vp) 1397 * Get value from device. 1398 * 1399 * Calling/Exit State: 1400 * Returns 1 if the value has been found, otherwise 0. 1401 * Must be called with the fd reserved, except for "DevMaj" / "DevMin". 1402 * 1403 * Description: 1404 * Finds the value of the specified device variable for 1405 * the device and returns it in the location pointed to 1406 * by vp. 1407 */ 1408 int 1409 nsc_getval(nsc_fd_t *fd, char *name, int *vp) 1410 { 1411 nsc_devval_t *dv; 1412 nsc_val_t *val; 1413 1414 if (!fd) 1415 return (0); 1416 1417 /* 1418 * Don't check for nsc_held() for the device number values 1419 * since these are magically created and cannot change when 1420 * the fd is not reserved. 1421 */ 1422 1423 if (strcmp(name, NSC_DEVMAJ) != 0 && 1424 strcmp(name, NSC_DEVMIN) != 0 && 1425 !nsc_held(fd)) 1426 return (0); 1427 1428 mutex_enter(&_nsc_devval_lock); 1429 1430 dv = fd->sf_dev->nsc_values; 1431 val = NULL; 1432 1433 if (dv != NULL) { 1434 for (val = dv->dv_values; val; val = val->sv_next) { 1435 if (strcmp(val->sv_name, name) == 0) { 1436 *vp = val->sv_value; 1437 break; 1438 } 1439 } 1440 } 1441 1442 mutex_exit(&_nsc_devval_lock); 1443 1444 return (val ? 1 : 0); 1445 } 1446 1447 1448 /* 1449 * char * 1450 * nsc_shared (nsc_fd_t *fd) 1451 * Device is currently shared. 1452 * 1453 * Calling/Exit State: 1454 * The device lock must be held across calls to this 1455 * this function. 1456 * 1457 * Returns an indication of whether the device accessed 1458 * by the file descriptor is currently referenced by more 1459 * than one user. 1460 * 1461 * This is only intended for use in performance critical 1462 * situations. 1463 */ 1464 int 1465 nsc_shared(fd) 1466 nsc_fd_t *fd; 1467 { 1468 nsc_iodev_t *iodev; 1469 int cnt = 0; 1470 1471 if (!fd) 1472 return (0); 1473 if (!fd->sf_iodev) 1474 return (1); 1475 1476 for (iodev = fd->sf_dev->nsc_list; iodev; iodev = iodev->si_next) 1477 for (fd = iodev->si_open; fd; fd = fd->sf_next) 1478 if (!fd->sf_owner && cnt++) 1479 return (1); 1480 1481 return (0); 1482 } 1483 1484 1485 /* 1486 * kmutex_t * 1487 * nsc_lock_addr (nsc_fd_t *fd) 1488 * Address of device lock. 1489 * 1490 * Calling/Exit State: 1491 * Returns a pointer to the spin lock associated with the 1492 * device. 1493 * 1494 * Description: 1495 * This is only intended for use in performance critical 1496 * situations in conjunction with nsc_reserve_lk. 1497 */ 1498 kmutex_t * 1499 nsc_lock_addr(fd) 1500 nsc_fd_t *fd; 1501 { 1502 return (&fd->sf_dev->nsc_lock); 1503 } 1504 1505 1506 /* 1507 * int 1508 * _nsc_call_io (long f, blind_t a, blind_t b, blind_t c) 1509 * Call information function. 1510 * 1511 * Calling/Exit State: 1512 * Returns result from function or 0 if not available. 1513 * f represents the offset into the I/O structure at which 1514 * the required function can be found and a, b, c are the 1515 * desired arguments. 1516 * 1517 * Description: 1518 * Calls the requested function for the first available 1519 * cache interface. 1520 */ 1521 int 1522 _nsc_call_io(long f, blind_t a, blind_t b, blind_t c) 1523 { 1524 nsc_io_t *io; 1525 int (*fn)(); 1526 int rc; 1527 1528 io = _nsc_reserve_io(NULL, NSC_SDBC_ID); 1529 if (!io) 1530 io = _nsc_reserve_io(NULL, NSC_NULL); 1531 1532 fn = (blindfn_t)(((long *)io)[f]); 1533 rc = (*fn)(a, b, c); 1534 1535 _nsc_release_io(io); 1536 return (rc); 1537 } 1538 1539 1540 /* 1541 * nsc_io_t * 1542 * _nsc_reserve_io (char *, int type) 1543 * Reserve I/O module. 1544 * 1545 * Calling/Exit State: 1546 * Returns address of I/O structure matching specified 1547 * type, or NULL. 1548 * 1549 * Description: 1550 * Searches for an appropriate I/O module and increments 1551 * the reference count to prevent it being unregistered. 1552 */ 1553 nsc_io_t * 1554 _nsc_reserve_io(path, type) 1555 char *path; 1556 int type; 1557 { 1558 nsc_io_t *io; 1559 1560 mutex_enter(&_nsc_io_lock); 1561 1562 if ((io = _nsc_find_io(path, type, NULL)) != 0) 1563 io->refcnt++; 1564 1565 mutex_exit(&_nsc_io_lock); 1566 return (io); 1567 } 1568 1569 1570 /* 1571 * static nsc_io_t * 1572 * _nsc_find_io (char *path, int type, int *changed) 1573 * Find I/O module. 1574 * 1575 * Calling/Exit State: 1576 * The _nsc_io_lock must be held across calls to 1577 * this function. 1578 * 1579 * Returns address of I/O structure matching specified 1580 * type, or NULL. 1581 * 1582 * 'changed' will be set to non-zero if there is a pending 1583 * nsc_path_t that matches the criteria for the requested type. 1584 * This allows nsctl to distinguish between multiple 1585 * nsc_register_path's done by the same I/O provider. 1586 * 1587 * Description: 1588 * Searches for an appropriate I/O module. 1589 * 1590 * 1. If <type> is a single module id find the specified I/O 1591 * module by module id. 1592 * 1593 * 2. Find the highest module that provides any of the I/O types 1594 * included in <type>, taking into account any modules 1595 * registered via the nsc_register_path() interface if <path> 1596 * is non-NULL. 1597 * 1598 * 3. Find an I/O module following the rules in (2), but whose 1599 * module id is less than the id OR'd into <type>. 1600 * 1601 * If no module is found by the above algorithms and NSC_NULL was 1602 * included in <type>, return the _nsc_null_io module. Otherwise 1603 * return NULL. 1604 */ 1605 static nsc_io_t * 1606 _nsc_find_io(char *path, int type, int *changed) 1607 { 1608 nsc_path_t *sp = NULL; 1609 nsc_path_t *pp = NULL; 1610 nsc_io_t *io; 1611 1612 type &= NSC_TYPES; 1613 1614 if (path) { 1615 for (sp = _nsc_path_top; sp; sp = sp->sp_next) { 1616 if ((type & NSC_ID) && 1617 sp->sp_io->id >= (type & NSC_IDS)) 1618 continue; 1619 1620 if (sp->sp_pend || (type & sp->sp_type) == 0) 1621 continue; 1622 1623 if (nsc_strmatch(path, sp->sp_path)) 1624 break; 1625 } 1626 1627 if (sp) { 1628 /* look for matching pending paths */ 1629 for (pp = _nsc_path_top; pp; pp = pp->sp_next) { 1630 if (pp->sp_pend && 1631 (type & pp->sp_type) && 1632 nsc_strmatch(path, pp->sp_path)) { 1633 break; 1634 } 1635 } 1636 } 1637 } 1638 1639 for (io = _nsc_io_top; io; io = io->next) { 1640 if (io->pend) 1641 continue; 1642 1643 if (type & NSC_ID) { 1644 if ((type & ~NSC_IDS) == 0) { 1645 if (io->id == type) 1646 break; 1647 continue; 1648 } 1649 1650 if (io->id >= (type & NSC_IDS)) 1651 continue; 1652 } 1653 1654 if (io->provide & type) 1655 break; 1656 } 1657 1658 if (pp && (!io || pp->sp_io->id >= io->id)) { 1659 /* 1660 * Mark this as a path change. 1661 */ 1662 if (changed) { 1663 *changed = 1; 1664 } 1665 } 1666 1667 if (sp && (!io || sp->sp_io->id >= io->id)) 1668 io = sp->sp_io; 1669 1670 if (!io && !(type & NSC_NULL)) 1671 return (NULL); 1672 1673 if (!io) 1674 io = _nsc_null_io; 1675 1676 return (io); 1677 } 1678 1679 1680 /* 1681 * void 1682 * _nsc_release_io (nsc_io_t *) 1683 * Release I/O module. 1684 * 1685 * Description: 1686 * Releases reference to I/O structure and wakes up 1687 * anybody waiting on it. 1688 */ 1689 void 1690 _nsc_release_io(io) 1691 nsc_io_t *io; 1692 { 1693 mutex_enter(&_nsc_io_lock); 1694 1695 io->refcnt--; 1696 cv_broadcast(&io->cv); 1697 1698 mutex_exit(&_nsc_io_lock); 1699 } 1700 1701 1702 /* 1703 * static int 1704 * _nsc_alloc_fd (char *path, int type, int flag, nsc_fd_t **fdp) 1705 * Allocate file descriptor structure. 1706 * 1707 * Calling/Exit State: 1708 * Stores address of file descriptor through fdp and 1709 * returns 0 on success, otherwise returns error code. 1710 * 1711 * Description: 1712 * A new file descriptor is allocated and linked in to 1713 * the file descriptor chain which is protected by the 1714 * device lock. 1715 * 1716 * On return the file descriptor must contain all the 1717 * information necessary to perform an open. Details 1718 * specific to user callbacks are not required yet. 1719 */ 1720 static int 1721 _nsc_alloc_fd(path, type, flag, fdp) 1722 char *path; 1723 int type, flag; 1724 nsc_fd_t **fdp; 1725 { 1726 nsc_dev_t *dev; 1727 nsc_fd_t *fd; 1728 int rc; 1729 1730 if (!(fd = (nsc_fd_t *)nsc_kmem_zalloc( 1731 sizeof (*fd), KM_SLEEP, _nsc_local_mem))) 1732 return (ENOMEM); 1733 1734 if ((rc = _nsc_alloc_dev(path, &dev)) != 0) { 1735 nsc_kmem_free(fd, sizeof (*fd)); 1736 return (rc); 1737 } 1738 1739 mutex_enter(&dev->nsc_lock); 1740 1741 fd->sf_type = type; 1742 fd->sf_flag = flag; 1743 fd->sf_dev = dev; 1744 fd->sf_next = dev->nsc_close; 1745 dev->nsc_close = fd; 1746 1747 mutex_exit(&dev->nsc_lock); 1748 1749 *fdp = fd; 1750 return (0); 1751 } 1752 1753 1754 /* 1755 * static int 1756 * _nsc_free_fd (nsc_fd_t *) 1757 * Free file descriptor. 1758 * 1759 * Description: 1760 * The file descriptor is removed from the chain and free'd 1761 * once pending activity has completed. 1762 */ 1763 static void 1764 _nsc_free_fd(fd) 1765 nsc_fd_t *fd; 1766 { 1767 nsc_dev_t *dev = fd->sf_dev; 1768 nsc_fd_t **fdp; 1769 1770 if (!fd) 1771 return; 1772 1773 mutex_enter(&dev->nsc_lock); 1774 1775 for (fdp = &dev->nsc_close; *fdp; fdp = &(*fdp)->sf_next) 1776 if (*fdp == fd) { 1777 *fdp = fd->sf_next; 1778 break; 1779 } 1780 1781 if (dev->nsc_wait || dev->nsc_refcnt <= 0) 1782 cv_broadcast(&dev->nsc_cv); 1783 1784 while (fd->sf_pend) 1785 (void) _nsc_wait_dev(dev, 0); 1786 1787 mutex_exit(&dev->nsc_lock); 1788 1789 _nsc_free_dev(dev); 1790 1791 nsc_kmem_free(fd, sizeof (*fd)); 1792 } 1793 1794 1795 /* 1796 * static void 1797 * _nsc_relink_fd (nsc_fd_t *fd, nsc_fd_t **from, 1798 * nsc_fd_t **to, nsc_iodev_t *iodev) 1799 * Relink file descriptor. 1800 * 1801 * Description: 1802 * Remove the file descriptor from the 'from' chain and 1803 * add it to the 'to' chain. The busy flag in iodev is 1804 * used to prevent modifications to the chain whilst a 1805 * callback is in progress. 1806 */ 1807 static void 1808 _nsc_relink_fd(nsc_fd_t *fd, nsc_fd_t **from, nsc_fd_t **to, nsc_iodev_t *iodev) 1809 { 1810 nsc_dev_t *dev = fd->sf_dev; 1811 nsc_fd_t **fdp; 1812 1813 mutex_enter(&dev->nsc_lock); 1814 1815 while (iodev->si_busy) 1816 (void) _nsc_wait_dev(dev, 0); 1817 1818 for (fdp = from; *fdp; fdp = &(*fdp)->sf_next) 1819 if (*fdp == fd) { 1820 *fdp = fd->sf_next; 1821 break; 1822 } 1823 1824 fd->sf_next = (*to); 1825 (*to) = fd; 1826 1827 mutex_exit(&dev->nsc_lock); 1828 } 1829 1830 1831 /* 1832 * static int 1833 * _nsc_alloc_iodev (nsc_dev_t *dev, int type, nsc_iodev_t **iodevp) 1834 * Allocate I/O device structure. 1835 * 1836 * Calling/Exit State: 1837 * Stores address of I/O device structure through iodevp 1838 * and returns 0 on success, otherwise returns error code. 1839 * 1840 * Description: 1841 * If an entry for the I/O device already exists increment 1842 * the reference count and return the address, otherwise 1843 * allocate a new structure. 1844 * 1845 * A new structure is allocated before scanning the chain 1846 * to avoid calling the memory allocator with a spin lock 1847 * held. If an entry is found the new structure is free'd. 1848 * 1849 * The I/O device chain is protected by the device lock. 1850 */ 1851 static int 1852 _nsc_alloc_iodev(dev, type, iodevp) 1853 nsc_dev_t *dev; 1854 int type; 1855 nsc_iodev_t **iodevp; 1856 { 1857 nsc_iodev_t *iodev, *ip; 1858 nsc_io_t *io; 1859 1860 if (!(iodev = (nsc_iodev_t *)nsc_kmem_zalloc( 1861 sizeof (*iodev), KM_SLEEP, _nsc_local_mem))) 1862 return (ENOMEM); 1863 1864 mutex_init(&iodev->si_lock, NULL, MUTEX_DRIVER, NULL); 1865 cv_init(&iodev->si_cv, NULL, CV_DRIVER, NULL); 1866 1867 if (!(io = _nsc_reserve_io(dev->nsc_path, type))) { 1868 mutex_destroy(&iodev->si_lock); 1869 cv_destroy(&iodev->si_cv); 1870 nsc_kmem_free(iodev, sizeof (*iodev)); 1871 return (ENXIO); 1872 } 1873 1874 iodev->si_refcnt++; 1875 iodev->si_io = io; 1876 iodev->si_dev = dev; 1877 1878 mutex_enter(&_nsc_io_lock); 1879 dev->nsc_refcnt++; 1880 mutex_exit(&_nsc_io_lock); 1881 1882 mutex_enter(&dev->nsc_lock); 1883 1884 for (ip = dev->nsc_list; ip; ip = ip->si_next) 1885 if (ip->si_io == io) { 1886 ip->si_refcnt++; 1887 break; 1888 } 1889 1890 if (!ip) { 1891 iodev->si_next = dev->nsc_list; 1892 dev->nsc_list = iodev; 1893 } 1894 1895 mutex_exit(&dev->nsc_lock); 1896 1897 if (ip) { 1898 _nsc_free_iodev(iodev); 1899 iodev = ip; 1900 } 1901 1902 *iodevp = iodev; 1903 return (0); 1904 } 1905 1906 1907 /* 1908 * static int 1909 * _nsc_free_iodev (nsc_iodev_t *iodev) 1910 * Free I/O device structure. 1911 * 1912 * Description: 1913 * Decrements the reference count of a previously allocated 1914 * I/O device structure. If this is the last reference it 1915 * is removed from the device chain and free'd once pending 1916 * activity has completed. 1917 */ 1918 static void 1919 _nsc_free_iodev(nsc_iodev_t *iodev) 1920 { 1921 nsc_iodev_t **ipp; 1922 nsc_dev_t *dev; 1923 1924 if (!iodev) 1925 return; 1926 1927 dev = iodev->si_dev; 1928 1929 mutex_enter(&dev->nsc_lock); 1930 1931 if (--iodev->si_refcnt > 0) { 1932 mutex_exit(&dev->nsc_lock); 1933 return; 1934 } 1935 1936 for (ipp = &dev->nsc_list; *ipp; ipp = &(*ipp)->si_next) 1937 if (*ipp == iodev) { 1938 *ipp = iodev->si_next; 1939 break; 1940 } 1941 1942 if (dev->nsc_wait || dev->nsc_refcnt <= 0) 1943 cv_broadcast(&dev->nsc_cv); 1944 1945 while (iodev->si_pend || iodev->si_rpend || iodev->si_busy) 1946 (void) _nsc_wait_dev(dev, 0); 1947 1948 mutex_exit(&dev->nsc_lock); 1949 1950 _nsc_release_io(iodev->si_io); 1951 _nsc_free_dev(dev); 1952 1953 mutex_destroy(&iodev->si_lock); 1954 cv_destroy(&iodev->si_cv); 1955 1956 nsc_kmem_free(iodev, sizeof (*iodev)); 1957 } 1958 1959 1960 /* 1961 * static int 1962 * _nsc_alloc_dev (char *path, nsc_dev_t **devp) 1963 * Allocate device structure. 1964 * 1965 * Calling/Exit State: 1966 * Stores address of device structure through devp 1967 * and returns 0 on success, otherwise returns error 1968 * code. 1969 * 1970 * Description: 1971 * If an entry for the device already exists increment 1972 * the reference count and return the address, otherwise 1973 * allocate a new structure. 1974 * 1975 * A new structure is allocated before scanning the device 1976 * chain to avoid calling the memory allocator with a spin 1977 * lock held. If the device is found the new structure is 1978 * free'd. 1979 * 1980 * The device chain is protected by _nsc_io_lock. 1981 */ 1982 static int 1983 _nsc_alloc_dev(char *path, nsc_dev_t **devp) 1984 { 1985 nsc_dev_t *dev, *dp, **ddp; 1986 nsc_devval_t *dv; 1987 nsc_rval_t *rval; 1988 ncall_t *ncall; 1989 int rc; 1990 1991 if (!(dev = (nsc_dev_t *)nsc_kmem_zalloc( 1992 sizeof (*dev), KM_SLEEP, _nsc_local_mem))) 1993 return (ENOMEM); 1994 1995 dev->nsc_refcnt++; 1996 1997 mutex_init(&dev->nsc_lock, NULL, MUTEX_DRIVER, NULL); 1998 cv_init(&dev->nsc_cv, NULL, CV_DRIVER, NULL); 1999 2000 dev->nsc_phash = nsc_strhash(path); 2001 dev->nsc_path = nsc_strdup(path); 2002 2003 mutex_enter(&_nsc_io_lock); 2004 2005 dev->nsc_next = _nsc_dev_pend; 2006 _nsc_dev_pend = dev; 2007 2008 mutex_exit(&_nsc_io_lock); 2009 2010 mutex_enter(&_nsc_io_lock); 2011 2012 for (dp = _nsc_dev_top; dp; dp = dp->nsc_next) 2013 if (dp->nsc_phash == dev->nsc_phash && 2014 strcmp(dp->nsc_path, dev->nsc_path) == 0) { 2015 dp->nsc_refcnt++; 2016 break; 2017 } 2018 2019 if (!dp) { 2020 for (ddp = &_nsc_dev_pend; *ddp; ddp = &(*ddp)->nsc_next) 2021 if (*ddp == dev) { 2022 *ddp = dev->nsc_next; 2023 break; 2024 } 2025 2026 dev->nsc_next = _nsc_dev_top; 2027 _nsc_dev_top = dev; 2028 } 2029 2030 mutex_exit(&_nsc_io_lock); 2031 2032 if (dp) { 2033 _nsc_free_dev(dev); 2034 dev = dp; 2035 } 2036 2037 /* 2038 * Try and find the device/values header for this device 2039 * and link it back to the device structure. 2040 */ 2041 2042 mutex_enter(&_nsc_devval_lock); 2043 2044 if (dev->nsc_values == NULL) { 2045 for (dv = _nsc_devval_top; dv; dv = dv->dv_next) { 2046 if (dv->dv_phash == dev->nsc_phash && 2047 strcmp(dv->dv_path, dev->nsc_path) == 0) { 2048 dev->nsc_values = dv; 2049 break; 2050 } 2051 } 2052 } 2053 2054 mutex_exit(&_nsc_devval_lock); 2055 2056 /* 2057 * Refresh the device/values from the other node 2058 */ 2059 2060 rval = nsc_kmem_zalloc(sizeof (*rval), KM_SLEEP, _nsc_local_mem); 2061 if (rval == NULL) { 2062 goto out; 2063 } 2064 2065 rc = ncall_alloc(ncall_mirror(ncall_self()), 0, 0, &ncall); 2066 if (rc == 0) { 2067 (void) strncpy(rval->path, path, sizeof (rval->path)); 2068 2069 rc = ncall_put_data(ncall, rval, sizeof (*rval)); 2070 if (rc == 0) { 2071 /* 2072 * Send synchronously and read a reply 2073 * so that we know that the updates 2074 * have completed before this 2075 * function returns. 2076 */ 2077 if (ncall_send(ncall, 0, NSC_SETVAL_ALL) == 0) 2078 (void) ncall_read_reply(ncall, 1, &rc); 2079 } 2080 2081 ncall_free(ncall); 2082 } 2083 2084 nsc_kmem_free(rval, sizeof (*rval)); 2085 2086 out: 2087 *devp = dev; 2088 return (0); 2089 } 2090 2091 2092 /* 2093 * static void 2094 * _nsc_free_dev (nsc_dev_t *dev) 2095 * Free device structure. 2096 * 2097 * Description: 2098 * Decrements the reference count of a previously allocated 2099 * device structure. If this is the last reference it is 2100 * removed from the device chain and free'd once pending 2101 * activity has completed. 2102 * 2103 * Whilst waiting for pending activity to cease the device is 2104 * relinked onto the pending chain. 2105 */ 2106 static void 2107 _nsc_free_dev(dev) 2108 nsc_dev_t *dev; 2109 { 2110 nsc_dev_t **ddp; 2111 2112 if (!dev) 2113 return; 2114 2115 mutex_enter(&_nsc_io_lock); 2116 2117 if (--dev->nsc_refcnt > 0) { 2118 mutex_exit(&_nsc_io_lock); 2119 return; 2120 } 2121 2122 for (ddp = &_nsc_dev_top; *ddp; ddp = &(*ddp)->nsc_next) 2123 if (*ddp == dev) { 2124 *ddp = dev->nsc_next; 2125 dev->nsc_next = _nsc_dev_pend; 2126 _nsc_dev_pend = dev; 2127 break; 2128 } 2129 2130 mutex_exit(&_nsc_io_lock); 2131 2132 mutex_enter(&dev->nsc_lock); 2133 2134 while (dev->nsc_pend || dev->nsc_rpend || dev->nsc_wait) { 2135 cv_wait(&dev->nsc_cv, &dev->nsc_lock); 2136 } 2137 2138 mutex_exit(&dev->nsc_lock); 2139 2140 mutex_enter(&_nsc_io_lock); 2141 2142 for (ddp = &_nsc_dev_pend; *ddp; ddp = &(*ddp)->nsc_next) 2143 if (*ddp == dev) { 2144 *ddp = dev->nsc_next; 2145 break; 2146 } 2147 2148 mutex_exit(&_nsc_io_lock); 2149 2150 mutex_destroy(&dev->nsc_lock); 2151 cv_destroy(&dev->nsc_cv); 2152 nsc_strfree(dev->nsc_path); 2153 2154 nsc_kmem_free(dev, sizeof (*dev)); 2155 } 2156 2157 2158 /* 2159 * static nsc_io_t * 2160 * _nsc_alloc_io (int id, char *name, int flag) 2161 * Allocate an I/O structure. 2162 * 2163 * Calling/Exit State: 2164 * Returns the address of the I/O structure, or NULL. 2165 */ 2166 static nsc_io_t * 2167 _nsc_alloc_io(id, name, flag) 2168 int id; 2169 char *name; 2170 int flag; 2171 { 2172 nsc_io_t *io; 2173 2174 if (!(io = (nsc_io_t *)nsc_kmem_zalloc( 2175 sizeof (*io), KM_NOSLEEP, _nsc_local_mem))) 2176 return (NULL); 2177 2178 cv_init(&io->cv, NULL, CV_DRIVER, NULL); 2179 2180 io->id = id; 2181 io->name = name; 2182 io->flag = flag; 2183 2184 return (io); 2185 } 2186 2187 2188 /* 2189 * static void 2190 * _nsc_free_io (int id, char *name, int flag) 2191 * Free an I/O structure. 2192 * 2193 * Calling/Exit State: 2194 * Free the I/O structure and remove it from the chain. 2195 */ 2196 static void 2197 _nsc_free_io(io) 2198 nsc_io_t *io; 2199 { 2200 nsc_io_t **iop; 2201 2202 mutex_enter(&_nsc_io_lock); 2203 2204 for (iop = &_nsc_io_top; *iop; iop = &(*iop)->next) 2205 if (*iop == io) 2206 break; 2207 2208 if (*iop) 2209 (*iop) = io->next; 2210 2211 mutex_exit(&_nsc_io_lock); 2212 2213 cv_destroy(&io->cv); 2214 nsc_kmem_free(io, sizeof (*io)); 2215 }