402 kmutex_t *);
403 static boolean_t zsd_wait_for_inprogress(zone_t *, struct zsd_entry *,
404 kmutex_t *);
405
406 /*
407 * Bump this number when you alter the zone syscall interfaces; this is
408 * because we need to have support for previous API versions in libc
409 * to support patching; libc calls into the kernel to determine this number.
410 *
411 * Version 1 of the API is the version originally shipped with Solaris 10
412 * Version 2 alters the zone_create system call in order to support more
413 * arguments by moving the args into a structure; and to do better
414 * error reporting when zone_create() fails.
415 * Version 3 alters the zone_create system call in order to support the
416 * import of ZFS datasets to zones.
417 * Version 4 alters the zone_create system call in order to support
418 * Trusted Extensions.
419 * Version 5 alters the zone_boot system call, and converts its old
420 * bootargs parameter to be set by the zone_setattr API instead.
421 * Version 6 adds the flag argument to zone_create.
422 */
423 static const int ZONE_SYSCALL_API_VERSION = 6;
424
425 /*
426 * Certain filesystems (such as NFS and autofs) need to know which zone
427 * the mount is being placed in. Because of this, we need to be able to
428 * ensure that a zone isn't in the process of being created/destroyed such
429 * that nfs_mount() thinks it is in the global/NGZ zone, while by the time
430 * it gets added the list of mounted zones, it ends up on the wrong zone's
431 * mount list. Since a zone can't reside on an NFS file system, we don't
432 * have to worry about the zonepath itself.
433 *
434 * The following functions: block_mounts()/resume_mounts() and
435 * mount_in_progress()/mount_completed() are used by zones and the VFS
436 * layer (respectively) to synchronize zone state transitions and new
437 * mounts within a zone. This syncronization is on a per-zone basis, so
438 * activity for one zone will not interfere with activity for another zone.
439 *
440 * The semantics are like a reader-reader lock such that there may
441 * either be multiple mounts (or zone state transitions, if that weren't
442 * serialized by zonehash_lock) in progress at the same time, but not
443 * both.
3003 }
3004
3005 ASSERT(refcnt == 0);
3006 /*
3007 * zsched has exited; the zone is dead.
3008 */
3009 zone->zone_zsched = NULL; /* paranoia */
3010 mutex_enter(&zone_status_lock);
3011 zone_status_set(zone, ZONE_IS_DEAD);
3012 out:
3013 mutex_exit(&zone_status_lock);
3014 zone_rele(zone);
3015 }
3016
3017 zoneid_t
3018 getzoneid(void)
3019 {
3020 return (curproc->p_zone->zone_id);
3021 }
3022
3023 /*
3024 * Internal versions of zone_find_by_*(). These don't zone_hold() or
3025 * check the validity of a zone's state.
3026 */
3027 static zone_t *
3028 zone_find_all_by_id(zoneid_t zoneid)
3029 {
3030 mod_hash_val_t hv;
3031 zone_t *zone = NULL;
3032
3033 ASSERT(MUTEX_HELD(&zonehash_lock));
3034
3035 if (mod_hash_find(zonehashbyid,
3036 (mod_hash_key_t)(uintptr_t)zoneid, &hv) == 0)
3037 zone = (zone_t *)hv;
3038 return (zone);
3039 }
3040
3041 static zone_t *
3042 zone_find_all_by_label(const ts_label_t *label)
4385
4386 kmem_free(kbuf, buflen);
4387 return (0);
4388 }
4389
4390 /*
4391 * System call to create/initialize a new zone named 'zone_name', rooted
4392 * at 'zone_root', with a zone-wide privilege limit set of 'zone_privs',
4393 * and initialized with the zone-wide rctls described in 'rctlbuf', and
4394 * with labeling set by 'match', 'doi', and 'label'.
4395 *
4396 * If extended error is non-null, we may use it to return more detailed
4397 * error information.
4398 */
4399 static zoneid_t
4400 zone_create(const char *zone_name, const char *zone_root,
4401 const priv_set_t *zone_privs, size_t zone_privssz,
4402 caddr_t rctlbuf, size_t rctlbufsz,
4403 caddr_t zfsbuf, size_t zfsbufsz, int *extended_error,
4404 int match, uint32_t doi, const bslabel_t *label,
4405 int flags)
4406 {
4407 struct zsched_arg zarg;
4408 nvlist_t *rctls = NULL;
4409 proc_t *pp = curproc;
4410 zone_t *zone, *ztmp;
4411 zoneid_t zoneid, start = GLOBAL_ZONEID;
4412 int error;
4413 int error2 = 0;
4414 char *str;
4415 cred_t *zkcr;
4416 boolean_t insert_label_hash;
4417
4418 if (secpolicy_zone_config(CRED()) != 0)
4419 return (set_errno(EPERM));
4420
4421 /* can't boot zone from within chroot environment */
4422 if (PTOU(pp)->u_rdir != NULL && PTOU(pp)->u_rdir != rootdir)
4423 return (zone_create_error(ENOTSUP, ZE_CHROOTED,
4424 extended_error));
4425 /*
4426 * As the first step of zone creation, we want to allocate a zoneid.
4427 * This allocation is complicated by the fact that netstacks use the
4428 * zoneid to determine their stackid, but netstacks themselves are
4429 * freed asynchronously with respect to zone destruction. This means
4430 * that a netstack reference leak (or in principle, an extraordinarily
4431 * long netstack reference hold) could result in a zoneid being
4432 * allocated that in fact corresponds to a stackid from an active
4433 * (referenced) netstack -- unleashing all sorts of havoc when that
4434 * netstack is actually (re)used. (In the abstract, we might wish a
4435 * zoneid to not be deallocated until its last referencing netstack
4436 * has been released, but netstacks lack a backpointer into their
4437 * referencing zone -- and changing them to have such a pointer would
4438 * be substantial, to put it euphemistically.) To avoid this, we
4439 * detect this condition on allocation: if we have allocated a zoneid
4440 * that corresponds to a netstack that's still in use, we warn about
4441 * it (as it is much more likely to be a reference leak than an actual
4442 * netstack reference), free it, and allocate another. That these
4443 * identifers are allocated out of an ID space assures that we won't
4444 * see the identifier we just allocated.
4456 } else if (zoneid == start) {
4457 /*
4458 * We have managed to iterate over the entire available
4459 * zoneid space -- there are no identifiers available,
4460 * presumably due to some number of leaked netstack
4461 * references. While it's in principle possible for us
4462 * to continue to try, it seems wiser to give up at
4463 * this point to warn and fail explicitly with a
4464 * distinctive error.
4465 */
4466 cmn_err(CE_WARN, "zone_create() failed: all available "
4467 "zone IDs have netstacks still in use");
4468 return (set_errno(ENFILE));
4469 }
4470
4471 cmn_err(CE_WARN, "unable to reuse zone ID %d; "
4472 "netstack still in use", zoneid);
4473 }
4474
4475 zone = kmem_zalloc(sizeof (zone_t), KM_SLEEP);
4476 zone->zone_id = zoneid;
4477 zone->zone_status = ZONE_IS_UNINITIALIZED;
4478 zone->zone_pool = pool_default;
4479 zone->zone_pool_mod = gethrtime();
4480 zone->zone_psetid = ZONE_PS_INVAL;
4481 zone->zone_ncpus = 0;
4482 zone->zone_ncpus_online = 0;
4483 zone->zone_restart_init = B_TRUE;
4484 zone->zone_brand = &native_brand;
4485 zone->zone_initname = NULL;
4486 mutex_init(&zone->zone_lock, NULL, MUTEX_DEFAULT, NULL);
4487 mutex_init(&zone->zone_nlwps_lock, NULL, MUTEX_DEFAULT, NULL);
4488 mutex_init(&zone->zone_mem_lock, NULL, MUTEX_DEFAULT, NULL);
4489 cv_init(&zone->zone_cv, NULL, CV_DEFAULT, NULL);
4490 list_create(&zone->zone_ref_list, sizeof (zone_ref_t),
4491 offsetof(zone_ref_t, zref_linkage));
4492 list_create(&zone->zone_zsd, sizeof (struct zsd_entry),
4493 offsetof(struct zsd_entry, zsd_linkage));
4494 list_create(&zone->zone_datasets, sizeof (zone_dataset_t),
4495 offsetof(zone_dataset_t, zd_linkage));
4496 list_create(&zone->zone_dl_list, sizeof (zone_dl_t),
5660 case ZONE_ATTR_SECFLAGS:
5661 size = sizeof (zone->zone_secflags);
5662 if (bufsize > size)
5663 bufsize = size;
5664 if ((err = copyout(&zone->zone_secflags, buf, bufsize)) != 0)
5665 error = EFAULT;
5666 break;
5667 case ZONE_ATTR_NETWORK:
5668 bufsize = MIN(bufsize, PIPE_BUF + sizeof (zone_net_data_t));
5669 size = bufsize;
5670 zbuf = kmem_alloc(bufsize, KM_SLEEP);
5671 if (copyin(buf, zbuf, bufsize) != 0) {
5672 error = EFAULT;
5673 } else {
5674 error = zone_get_network(zoneid, zbuf);
5675 if (error == 0 && copyout(zbuf, buf, bufsize) != 0)
5676 error = EFAULT;
5677 }
5678 kmem_free(zbuf, bufsize);
5679 break;
5680 default:
5681 if ((attr >= ZONE_ATTR_BRAND_ATTRS) && ZONE_IS_BRANDED(zone)) {
5682 size = bufsize;
5683 error = ZBROP(zone)->b_getattr(zone, attr, buf, &size);
5684 } else {
5685 error = EINVAL;
5686 }
5687 }
5688 zone_rele(zone);
5689
5690 if (error)
5691 return (set_errno(error));
5692 return ((ssize_t)size);
5693 }
5694
5695 /*
5696 * Systemcall entry point for zone_setattr(2).
5697 */
5698 /*ARGSUSED*/
5699 static int
6464 return (set_errno(EFAULT));
6465 }
6466 zs.zone_name =
6467 (const char *)(unsigned long)zs32.zone_name;
6468 zs.zone_root =
6469 (const char *)(unsigned long)zs32.zone_root;
6470 zs.zone_privs =
6471 (const struct priv_set *)
6472 (unsigned long)zs32.zone_privs;
6473 zs.zone_privssz = zs32.zone_privssz;
6474 zs.rctlbuf = (caddr_t)(unsigned long)zs32.rctlbuf;
6475 zs.rctlbufsz = zs32.rctlbufsz;
6476 zs.zfsbuf = (caddr_t)(unsigned long)zs32.zfsbuf;
6477 zs.zfsbufsz = zs32.zfsbufsz;
6478 zs.extended_error =
6479 (int *)(unsigned long)zs32.extended_error;
6480 zs.match = zs32.match;
6481 zs.doi = zs32.doi;
6482 zs.label = (const bslabel_t *)(uintptr_t)zs32.label;
6483 zs.flags = zs32.flags;
6484 #else
6485 panic("get_udatamodel() returned bogus result\n");
6486 #endif
6487 }
6488
6489 return (zone_create(zs.zone_name, zs.zone_root,
6490 zs.zone_privs, zs.zone_privssz,
6491 (caddr_t)zs.rctlbuf, zs.rctlbufsz,
6492 (caddr_t)zs.zfsbuf, zs.zfsbufsz,
6493 zs.extended_error, zs.match, zs.doi,
6494 zs.label, zs.flags));
6495 case ZONE_BOOT:
6496 return (zone_boot((zoneid_t)(uintptr_t)arg1));
6497 case ZONE_DESTROY:
6498 return (zone_destroy((zoneid_t)(uintptr_t)arg1));
6499 case ZONE_GETATTR:
6500 return (zone_getattr((zoneid_t)(uintptr_t)arg1,
6501 (int)(uintptr_t)arg2, arg3, (size_t)arg4));
6502 case ZONE_SETATTR:
6503 return (zone_setattr((zoneid_t)(uintptr_t)arg1,
6504 (int)(uintptr_t)arg2, arg3, (size_t)arg4));
6505 case ZONE_ENTER:
6506 return (zone_enter((zoneid_t)(uintptr_t)arg1));
6507 case ZONE_LIST:
6508 return (zone_list((zoneid_t *)arg1, (uint_t *)arg2));
6509 case ZONE_SHUTDOWN:
6510 return (zone_shutdown((zoneid_t)(uintptr_t)arg1));
6511 case ZONE_LOOKUP:
6512 return (zone_lookup((const char *)arg1));
6513 case ZONE_VERSION:
6514 return (zone_version((int *)arg1));
|
402 kmutex_t *);
403 static boolean_t zsd_wait_for_inprogress(zone_t *, struct zsd_entry *,
404 kmutex_t *);
405
406 /*
407 * Bump this number when you alter the zone syscall interfaces; this is
408 * because we need to have support for previous API versions in libc
409 * to support patching; libc calls into the kernel to determine this number.
410 *
411 * Version 1 of the API is the version originally shipped with Solaris 10
412 * Version 2 alters the zone_create system call in order to support more
413 * arguments by moving the args into a structure; and to do better
414 * error reporting when zone_create() fails.
415 * Version 3 alters the zone_create system call in order to support the
416 * import of ZFS datasets to zones.
417 * Version 4 alters the zone_create system call in order to support
418 * Trusted Extensions.
419 * Version 5 alters the zone_boot system call, and converts its old
420 * bootargs parameter to be set by the zone_setattr API instead.
421 * Version 6 adds the flag argument to zone_create.
422 * Version 7 adds the requested zone_did to zone_create.
423 */
424 static const int ZONE_SYSCALL_API_VERSION = 7;
425
426 /*
427 * Certain filesystems (such as NFS and autofs) need to know which zone
428 * the mount is being placed in. Because of this, we need to be able to
429 * ensure that a zone isn't in the process of being created/destroyed such
430 * that nfs_mount() thinks it is in the global/NGZ zone, while by the time
431 * it gets added the list of mounted zones, it ends up on the wrong zone's
432 * mount list. Since a zone can't reside on an NFS file system, we don't
433 * have to worry about the zonepath itself.
434 *
435 * The following functions: block_mounts()/resume_mounts() and
436 * mount_in_progress()/mount_completed() are used by zones and the VFS
437 * layer (respectively) to synchronize zone state transitions and new
438 * mounts within a zone. This syncronization is on a per-zone basis, so
439 * activity for one zone will not interfere with activity for another zone.
440 *
441 * The semantics are like a reader-reader lock such that there may
442 * either be multiple mounts (or zone state transitions, if that weren't
443 * serialized by zonehash_lock) in progress at the same time, but not
444 * both.
3004 }
3005
3006 ASSERT(refcnt == 0);
3007 /*
3008 * zsched has exited; the zone is dead.
3009 */
3010 zone->zone_zsched = NULL; /* paranoia */
3011 mutex_enter(&zone_status_lock);
3012 zone_status_set(zone, ZONE_IS_DEAD);
3013 out:
3014 mutex_exit(&zone_status_lock);
3015 zone_rele(zone);
3016 }
3017
3018 zoneid_t
3019 getzoneid(void)
3020 {
3021 return (curproc->p_zone->zone_id);
3022 }
3023
3024 zoneid_t
3025 getzonedid(void)
3026 {
3027 return (curproc->p_zone->zone_did);
3028 }
3029
3030 /*
3031 * Internal versions of zone_find_by_*(). These don't zone_hold() or
3032 * check the validity of a zone's state.
3033 */
3034 static zone_t *
3035 zone_find_all_by_id(zoneid_t zoneid)
3036 {
3037 mod_hash_val_t hv;
3038 zone_t *zone = NULL;
3039
3040 ASSERT(MUTEX_HELD(&zonehash_lock));
3041
3042 if (mod_hash_find(zonehashbyid,
3043 (mod_hash_key_t)(uintptr_t)zoneid, &hv) == 0)
3044 zone = (zone_t *)hv;
3045 return (zone);
3046 }
3047
3048 static zone_t *
3049 zone_find_all_by_label(const ts_label_t *label)
4392
4393 kmem_free(kbuf, buflen);
4394 return (0);
4395 }
4396
4397 /*
4398 * System call to create/initialize a new zone named 'zone_name', rooted
4399 * at 'zone_root', with a zone-wide privilege limit set of 'zone_privs',
4400 * and initialized with the zone-wide rctls described in 'rctlbuf', and
4401 * with labeling set by 'match', 'doi', and 'label'.
4402 *
4403 * If extended error is non-null, we may use it to return more detailed
4404 * error information.
4405 */
4406 static zoneid_t
4407 zone_create(const char *zone_name, const char *zone_root,
4408 const priv_set_t *zone_privs, size_t zone_privssz,
4409 caddr_t rctlbuf, size_t rctlbufsz,
4410 caddr_t zfsbuf, size_t zfsbufsz, int *extended_error,
4411 int match, uint32_t doi, const bslabel_t *label,
4412 int flags, zoneid_t zone_did)
4413 {
4414 struct zsched_arg zarg;
4415 nvlist_t *rctls = NULL;
4416 proc_t *pp = curproc;
4417 zone_t *zone, *ztmp;
4418 zoneid_t zoneid, start = GLOBAL_ZONEID;
4419 int error;
4420 int error2 = 0;
4421 char *str;
4422 cred_t *zkcr;
4423 boolean_t insert_label_hash;
4424
4425 if (secpolicy_zone_config(CRED()) != 0)
4426 return (set_errno(EPERM));
4427
4428 /* can't boot zone from within chroot environment */
4429 if (PTOU(pp)->u_rdir != NULL && PTOU(pp)->u_rdir != rootdir)
4430 return (zone_create_error(ENOTSUP, ZE_CHROOTED,
4431 extended_error));
4432
4433 /*
4434 * As the first step of zone creation, we want to allocate a zoneid.
4435 * This allocation is complicated by the fact that netstacks use the
4436 * zoneid to determine their stackid, but netstacks themselves are
4437 * freed asynchronously with respect to zone destruction. This means
4438 * that a netstack reference leak (or in principle, an extraordinarily
4439 * long netstack reference hold) could result in a zoneid being
4440 * allocated that in fact corresponds to a stackid from an active
4441 * (referenced) netstack -- unleashing all sorts of havoc when that
4442 * netstack is actually (re)used. (In the abstract, we might wish a
4443 * zoneid to not be deallocated until its last referencing netstack
4444 * has been released, but netstacks lack a backpointer into their
4445 * referencing zone -- and changing them to have such a pointer would
4446 * be substantial, to put it euphemistically.) To avoid this, we
4447 * detect this condition on allocation: if we have allocated a zoneid
4448 * that corresponds to a netstack that's still in use, we warn about
4449 * it (as it is much more likely to be a reference leak than an actual
4450 * netstack reference), free it, and allocate another. That these
4451 * identifers are allocated out of an ID space assures that we won't
4452 * see the identifier we just allocated.
4464 } else if (zoneid == start) {
4465 /*
4466 * We have managed to iterate over the entire available
4467 * zoneid space -- there are no identifiers available,
4468 * presumably due to some number of leaked netstack
4469 * references. While it's in principle possible for us
4470 * to continue to try, it seems wiser to give up at
4471 * this point to warn and fail explicitly with a
4472 * distinctive error.
4473 */
4474 cmn_err(CE_WARN, "zone_create() failed: all available "
4475 "zone IDs have netstacks still in use");
4476 return (set_errno(ENFILE));
4477 }
4478
4479 cmn_err(CE_WARN, "unable to reuse zone ID %d; "
4480 "netstack still in use", zoneid);
4481 }
4482
4483 zone = kmem_zalloc(sizeof (zone_t), KM_SLEEP);
4484
4485 zone->zone_id = zoneid;
4486 zone->zone_did = zone_did;
4487 zone->zone_status = ZONE_IS_UNINITIALIZED;
4488 zone->zone_pool = pool_default;
4489 zone->zone_pool_mod = gethrtime();
4490 zone->zone_psetid = ZONE_PS_INVAL;
4491 zone->zone_ncpus = 0;
4492 zone->zone_ncpus_online = 0;
4493 zone->zone_restart_init = B_TRUE;
4494 zone->zone_brand = &native_brand;
4495 zone->zone_initname = NULL;
4496 mutex_init(&zone->zone_lock, NULL, MUTEX_DEFAULT, NULL);
4497 mutex_init(&zone->zone_nlwps_lock, NULL, MUTEX_DEFAULT, NULL);
4498 mutex_init(&zone->zone_mem_lock, NULL, MUTEX_DEFAULT, NULL);
4499 cv_init(&zone->zone_cv, NULL, CV_DEFAULT, NULL);
4500 list_create(&zone->zone_ref_list, sizeof (zone_ref_t),
4501 offsetof(zone_ref_t, zref_linkage));
4502 list_create(&zone->zone_zsd, sizeof (struct zsd_entry),
4503 offsetof(struct zsd_entry, zsd_linkage));
4504 list_create(&zone->zone_datasets, sizeof (zone_dataset_t),
4505 offsetof(zone_dataset_t, zd_linkage));
4506 list_create(&zone->zone_dl_list, sizeof (zone_dl_t),
5670 case ZONE_ATTR_SECFLAGS:
5671 size = sizeof (zone->zone_secflags);
5672 if (bufsize > size)
5673 bufsize = size;
5674 if ((err = copyout(&zone->zone_secflags, buf, bufsize)) != 0)
5675 error = EFAULT;
5676 break;
5677 case ZONE_ATTR_NETWORK:
5678 bufsize = MIN(bufsize, PIPE_BUF + sizeof (zone_net_data_t));
5679 size = bufsize;
5680 zbuf = kmem_alloc(bufsize, KM_SLEEP);
5681 if (copyin(buf, zbuf, bufsize) != 0) {
5682 error = EFAULT;
5683 } else {
5684 error = zone_get_network(zoneid, zbuf);
5685 if (error == 0 && copyout(zbuf, buf, bufsize) != 0)
5686 error = EFAULT;
5687 }
5688 kmem_free(zbuf, bufsize);
5689 break;
5690 case ZONE_ATTR_DID:
5691 size = sizeof (zoneid_t);
5692 if (bufsize > size)
5693 bufsize = size;
5694
5695 if (buf != NULL && copyout(&zone->zone_did, buf, bufsize) != 0)
5696 error = EFAULT;
5697 break;
5698 default:
5699 if ((attr >= ZONE_ATTR_BRAND_ATTRS) && ZONE_IS_BRANDED(zone)) {
5700 size = bufsize;
5701 error = ZBROP(zone)->b_getattr(zone, attr, buf, &size);
5702 } else {
5703 error = EINVAL;
5704 }
5705 }
5706 zone_rele(zone);
5707
5708 if (error)
5709 return (set_errno(error));
5710 return ((ssize_t)size);
5711 }
5712
5713 /*
5714 * Systemcall entry point for zone_setattr(2).
5715 */
5716 /*ARGSUSED*/
5717 static int
6482 return (set_errno(EFAULT));
6483 }
6484 zs.zone_name =
6485 (const char *)(unsigned long)zs32.zone_name;
6486 zs.zone_root =
6487 (const char *)(unsigned long)zs32.zone_root;
6488 zs.zone_privs =
6489 (const struct priv_set *)
6490 (unsigned long)zs32.zone_privs;
6491 zs.zone_privssz = zs32.zone_privssz;
6492 zs.rctlbuf = (caddr_t)(unsigned long)zs32.rctlbuf;
6493 zs.rctlbufsz = zs32.rctlbufsz;
6494 zs.zfsbuf = (caddr_t)(unsigned long)zs32.zfsbuf;
6495 zs.zfsbufsz = zs32.zfsbufsz;
6496 zs.extended_error =
6497 (int *)(unsigned long)zs32.extended_error;
6498 zs.match = zs32.match;
6499 zs.doi = zs32.doi;
6500 zs.label = (const bslabel_t *)(uintptr_t)zs32.label;
6501 zs.flags = zs32.flags;
6502 zs.zone_did = zs32.zone_did;
6503 #else
6504 panic("get_udatamodel() returned bogus result\n");
6505 #endif
6506 }
6507
6508 return (zone_create(zs.zone_name, zs.zone_root,
6509 zs.zone_privs, zs.zone_privssz,
6510 (caddr_t)zs.rctlbuf, zs.rctlbufsz,
6511 (caddr_t)zs.zfsbuf, zs.zfsbufsz,
6512 zs.extended_error, zs.match, zs.doi,
6513 zs.label, zs.flags, zs.zone_did));
6514 case ZONE_BOOT:
6515 return (zone_boot((zoneid_t)(uintptr_t)arg1));
6516 case ZONE_DESTROY:
6517 return (zone_destroy((zoneid_t)(uintptr_t)arg1));
6518 case ZONE_GETATTR:
6519 return (zone_getattr((zoneid_t)(uintptr_t)arg1,
6520 (int)(uintptr_t)arg2, arg3, (size_t)arg4));
6521 case ZONE_SETATTR:
6522 return (zone_setattr((zoneid_t)(uintptr_t)arg1,
6523 (int)(uintptr_t)arg2, arg3, (size_t)arg4));
6524 case ZONE_ENTER:
6525 return (zone_enter((zoneid_t)(uintptr_t)arg1));
6526 case ZONE_LIST:
6527 return (zone_list((zoneid_t *)arg1, (uint_t *)arg2));
6528 case ZONE_SHUTDOWN:
6529 return (zone_shutdown((zoneid_t)(uintptr_t)arg1));
6530 case ZONE_LOOKUP:
6531 return (zone_lookup((const char *)arg1));
6532 case ZONE_VERSION:
6533 return (zone_version((int *)arg1));
|