1 /*
   2  * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
   3  * Authors: Doug Rabson <dfr@rabson.org>
   4  * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
   5  *
   6  * Redistribution and use in source and binary forms, with or without
   7  * modification, are permitted provided that the following conditions
   8  * are met:
   9  * 1. Redistributions of source code must retain the above copyright
  10  *    notice, this list of conditions and the following disclaimer.
  11  * 2. Redistributions in binary form must reproduce the above copyright
  12  *    notice, this list of conditions and the following disclaimer in the
  13  *    documentation and/or other materials provided with the distribution.
  14  *
  15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  25  * SUCH DAMAGE.
  26  *
  27  * $FreeBSD$
  28  */
  29 
  30 /*
  31  * Copyright 2012 Nexenta Systems, Inc.  All rights reserved.
  32  * Copyright (c) 2012 by Delphix. All rights reserved.
  33  */
  34 
  35 /*
  36  * NFS Lock Manager (NLM) private declarations, etc.
  37  *
  38  * Source code derived from FreeBSD nlm.h
  39  */
  40 
  41 #ifndef _NLM_NLM_H_
  42 #define _NLM_NLM_H_
  43 
  44 #include <sys/cmn_err.h>
  45 #include <sys/queue.h>
  46 #include <sys/modhash.h>
  47 #include <sys/avl.h>
  48 
  49 #define RPC_MSGOUT(args...)     cmn_err(CE_NOTE, args)
  50 #define NLM_ERR(...)            cmn_err(CE_NOTE, __VA_ARGS__)
  51 #define NLM_WARN(...)           cmn_err(CE_WARN, __VA_ARGS__)
  52 
  53 #ifndef SEEK_SET
  54 #define SEEK_SET        0
  55 #endif
  56 #ifndef SEEK_CUR
  57 #define SEEK_CUR        1
  58 #endif
  59 #ifndef SEEK_END
  60 #define SEEK_END        2
  61 #endif
  62 
  63 /*
  64  * Maximum offset supported by NLM calls using the older
  65  * (32-bit) versions of the protocol.
  66  */
  67 #define MAX_UOFF32      0xffffffffULL
  68 
  69 struct nlm_host;
  70 struct vnode;
  71 struct exportinfo;
  72 struct shrlock;
  73 struct _kthread;
  74 
  75 /*
  76  * How to read the code: probably the best point to start
  77  * it the nlm_host structure that is sort of most major
  78  * structure in klmmod. nlm_host is closely tied with all
  79  * other NLM structures.
  80  *
  81  * There're three major locks we use inside NLM:
  82  * 1) Global read-write lock (lm_lck) that is used to
  83  *    protect operations with sysid allocation and
  84  *    management of zone globals structures for each
  85  *    zone.
  86  * 2) Zone global lock: (nlm_globals->lock) is a mutex
  87  *    used to protect all operations inside particular
  88  *    zone.
  89  * 3) Host's lock: (nlm_host->nh_lock) is per-host mutex
  90  *    used to protect host's internal fields and all
  91  *    operations with the given host.
  92  *
  93  * Locks order _must_ obey the following scheme:
  94  *  lm_lck then nlm_globals->lock then nlm_host->nh_lock
  95  *
  96  * Locks:
  97  * (g)          locked by lm_lck
  98  * (z)          locked by nlm_globals->lock
  99  * (l)          locked by host->nh_lock
 100  * (c)          const until freeing
 101  */
 102 
 103 /*
 104  * Callback functions for nlm_do_lock() and others.
 105  *
 106  * Calls to nlm_do_lock are unusual, because it needs to handle
 107  * the reply itself, instead of letting it happen the normal way.
 108  * It also needs to make an RPC call _back_ to the client when a
 109  * blocked lock request completes.
 110  *
 111  * We pass three callback functions to nlm_do_lock:
 112  *    nlm_reply_cb: send a normal RPC reply
 113  *      nlm_res_cb: do a _res (message style) RPC (call)
 114  * nlm_testargs_cb: do a "granted" RPC call (after blocking)
 115  * Only one of the 1st or 2nd is used.
 116  * The 3rd is used only for blocking
 117  *
 118  * We also use callback functions for all the _msg variants
 119  * of the NLM svc calls, where the reply is a reverse call.
 120  * The nlm_testres_cb is used by the _test_msg svc calls.
 121  * The nlm_res_cb type is used by the other _msg calls.
 122  */
 123 typedef bool_t (*nlm_reply_cb)(SVCXPRT *, nlm4_res *);
 124 typedef enum clnt_stat (*nlm_res_cb)(nlm4_res *, void *, CLIENT *);
 125 typedef enum clnt_stat (*nlm_testargs_cb)(nlm4_testargs *, void *, CLIENT *);
 126 typedef enum clnt_stat (*nlm_testres_cb)(nlm4_testres *, void *, CLIENT *);
 127 
 128 /*
 129  * NLM sleeping lock request.
 130  *
 131  * Sleeping lock requests are server side only objects
 132  * that are created when client asks server to add new
 133  * sleeping lock and when this lock needs to block.
 134  * Server keeps a track of these requests in order to be
 135  * able to cancel them or clean them up.
 136  *
 137  * Sleeping lock requests are closely tiled with particular
 138  * vnode or, strictly speaking, NLM vhold object that holds
 139  * the vnode.
 140  *
 141  * struct nlm_slreq:
 142  *   nsr_fl: an information about file lock
 143  *   nsr_link: a list node to store lock requests
 144  *             in vhold object.
 145  */
 146 struct nlm_slreq {
 147         struct flock64          nsr_fl;
 148         TAILQ_ENTRY(nlm_slreq)  nsr_link;
 149 };
 150 TAILQ_HEAD(nlm_slreq_list, nlm_slreq);
 151 
 152 /*
 153  * NLM vhold object is a sort of wrapper on vnodes remote
 154  * clients have locked (or added share reservation)
 155  * on NLM server. Vhold keeps vnode held (by VN_HOLD())
 156  * while vnode has any locks or shares made by parent host.
 157  * Vholds are used for two purposes:
 158  * 1) Hold vnode (with VN_HOLD) while it has any locks;
 159  * 2) Keep a track of all vnodes remote host touched
 160  *    with lock/share operations on NLM server, so that NLM
 161  *    can know what vnodes are potentially locked;
 162  *
 163  * Vholds are used on server side only. For server side it's really
 164  * important to keep vnodes held while they potentially have
 165  * any locks/shares. In contrast, it's not important for clinet
 166  * side at all. When particular vnode comes to the NLM client side
 167  * code, it's already held (VN_HOLD) by the process calling
 168  * lock/share function (it's referenced because client calls open()
 169  * before making locks or shares).
 170  *
 171  * Each NLM host object has a collection of vholds associated
 172  * with vnodes host touched earlier by adding locks or shares.
 173  * Having this collection allows us to decide if host is still
 174  * in use. When it has any vhold objects it's considered to be
 175  * in use. Otherwise we're free to destroy it.
 176  *
 177  * Vholds are destroyed by the NLM garbage collecter thread that
 178  * periodically checks whether they have any locks or shares.
 179  * Checking occures when parent host is untouched by client
 180  * or server for some period of time.
 181  *
 182  * struct nlm_vhold:
 183  *   nv_vp: a pointer to vnode that is hold by given nlm_vhold
 184  *   nv_refcnt: reference counter (non zero when vhold is inuse)
 185  *   nv_slreqs: sleeping lock requests that were made on the nv_vp
 186  *   nv_link: list node to store vholds in host's nh_vnodes_list
 187  */
 188 struct nlm_vhold {
 189         vnode_t                 *nv_vp;    /* (c) */
 190         int                     nv_refcnt; /* (l) */
 191         struct nlm_slreq_list   nv_slreqs; /* (l) */
 192         TAILQ_ENTRY(nlm_vhold)  nv_link;   /* (l) */
 193 };
 194 TAILQ_HEAD(nlm_vhold_list, nlm_vhold);
 195 
 196 /*
 197  * Client side sleeping lock state.
 198  * - NLM_SL_BLOCKED: some thread is blocked on this lock
 199  * - NLM_SL_GRANTED: server granted us the lock
 200  * - NLM_SL_CANCELLED: the lock is cancelled (i.e. invalid/inactive)
 201  */
 202 typedef enum nlm_slock_state {
 203         NLM_SL_UNKNOWN = 0,
 204         NLM_SL_BLOCKED,
 205         NLM_SL_GRANTED,
 206         NLM_SL_CANCELLED
 207 } nlm_slock_state_t;
 208 
 209 /*
 210  * A client side sleeping lock request (set by F_SETLKW)
 211  * stored in nlm_slocks collection of nlm_globals.
 212  *
 213  *  struct nlm_slock
 214  *   nsl_state: Sleeping lock state.
 215  *             (see nlm_slock_state for more information)
 216  *   nsl_cond: Condvar that is used when sleeping lock
 217  *            needs to wait for a GRANT callback
 218  *            or cancellation event.
 219  *   nsl_lock: nlm4_lock structure that is sent to the server
 220  *   nsl_fh: Filehandle that corresponds to nw_vp
 221  *   nsl_host: A host owning this sleeping lock
 222  *   nsl_vp: A vnode sleeping lock is waiting on.
 223  *   nsl_link: A list node for nlm_globals->nlm_slocks list.
 224  */
 225 struct nlm_slock {
 226         nlm_slock_state_t       nsl_state; /* (z) */
 227         kcondvar_t              nsl_cond;  /* (z) */
 228         nlm4_lock               nsl_lock;  /* (c) */
 229         struct netobj           nsl_fh;    /* (c) */
 230         struct nlm_host         *nsl_host; /* (c) */
 231         struct vnode            *nsl_vp;   /* (c) */
 232         TAILQ_ENTRY(nlm_slock)  nsl_link;  /* (z) */
 233 };
 234 TAILQ_HEAD(nlm_slock_list, nlm_slock);
 235 
 236 /*
 237  * Share reservation description. NLM tracks all active
 238  * share reservations made by the client side, so that
 239  * they can be easily recovered if remote NLM server
 240  * reboots. Share reservations tracking is also useful
 241  * when NLM needs to determine whether host owns any
 242  * resources on the system and can't be destroyed.
 243  *
 244  * nlm_shres:
 245  *   ns_shr: share reservation description
 246  *   ns_vp: a pointer to vnode where share reservation is located
 247  *   ns_next: next nlm_shres instance (or NULL if next item isn't
 248  *            present).
 249  */
 250 struct nlm_shres {
 251         struct shrlock          *ns_shr;
 252         vnode_t                 *ns_vp;
 253         struct nlm_shres        *ns_next;
 254 };
 255 
 256 /*
 257  * NLM RPC handle object.
 258  *
 259  * In kRPC subsystem it's unsafe to use one RPC handle by
 260  * several threads simultaneously. It was designed so that
 261  * each thread has to create an RPC handle that it'll use.
 262  * RPC handle creation can be quite expensive operation, especially
 263  * with session oriented protocols (such as TCP) that need to
 264  * establish session at first. NLM RPC handle object is a sort of
 265  * wrapper on kRPC handle object that can be cached and used in
 266  * future. We store all created RPC handles for given host in a
 267  * host's RPC handles cache, so that to make new requests threads
 268  * can simply take ready objects from the cache. That improves
 269  * NLM performance.
 270  *
 271  * nlm_rpc_t:
 272  *   nr_handle: a kRPC handle itself.
 273  *   nr_vers: a version of NLM protocol kRPC handle was
 274  *            created for.
 275  *   nr_link: a list node to store NLM RPC handles in the host
 276  *            RPC handles cache.
 277  */
 278 typedef struct nlm_rpc {
 279         CLIENT    *nr_handle;           /* (l) */
 280         rpcvers_t  nr_vers;             /* (c) */
 281         TAILQ_ENTRY(nlm_rpc) nr_link;   /* (l) */
 282 } nlm_rpc_t;
 283 TAILQ_HEAD(nlm_rpch_list, nlm_rpc);
 284 
 285 /*
 286  * Describes the state of NLM host's RPC binding.
 287  * RPC binding can be in one of three states:
 288  * 1) NRPCB_NEED_UPDATE:
 289  *    Binding is either not initialized or stale.
 290  * 2) NRPCB_UPDATE_INPROGRESS:
 291  *    When some thread updates host's RPC binding,
 292  *    it sets binding's state to NRPCB_UPDATE_INPROGRESS
 293  *    which denotes that other threads must wait until
 294  *    update process is finished.
 295  * 3) NRPCB_UPDATED:
 296  *    Denotes that host's RPC binding is both initialized
 297  *    and fresh.
 298  */
 299 enum nlm_rpcb_state {
 300         NRPCB_NEED_UPDATE = 0,
 301         NRPCB_UPDATE_INPROGRESS,
 302         NRPCB_UPDATED
 303 };
 304 
 305 /*
 306  * NLM host flags
 307  */
 308 #define NLM_NH_MONITORED 0x01
 309 #define NLM_NH_RECLAIM   0x02
 310 #define NLM_NH_INIDLE    0x04
 311 #define NLM_NH_SUSPEND   0x08
 312 
 313 /*
 314  * NLM host object is the most major structure in NLM.
 315  * It identifies remote client or remote server or both.
 316  * NLM host object keep a track of all vnodes client/server
 317  * locked and all sleeping locks it has. All lock/unlock
 318  * operations are done using host object.
 319  *
 320  * nlm_host:
 321  *   nh_lock: a mutex protecting host object fields
 322  *   nh_refs: reference counter. Identifies how many threads
 323  *            uses this host object.
 324  *   nh_link: a list node for keeping host in zone-global list.
 325  *   nh_by_addr: an AVL tree node for keeping host in zone-global tree.
 326  *              Host can be looked up in the tree by <netid, address>
 327  *              pair.
 328  *   nh_name: host name.
 329  *   nh_netid: netid string identifying type of transport host uses.
 330  *   nh_knc: host's knetconfig (used by kRPC subsystem).
 331  *   nh_addr: host's address (either IPv4 or IPv6).
 332  *   nh_sysid: unique sysid associated with this host.
 333  *   nh_state: last seen host's state reported by NSM.
 334  *   nh_flags: ORed host flags.
 335  *   nh_idle_timeout: host idle timeout. When expired host is freed.
 336  *   nh_recl_cv: condition variable used for reporting that reclamation
 337  *               process is finished.
 338  *   nh_rpcb_cv: condition variable that is used to make sure
 339  *               that only one thread renews host's RPC binding.
 340  *   nh_rpcb_ustat: error code returned by RPC binding update operation.
 341  *   nh_rpcb_state: host's RPC binding state (see enum nlm_rpcb_state
 342  *                  for more details).
 343  *   nh_rpchc: host's RPC handles cache.
 344  *   nh_vholds_by_vp: a hash table of all vholds host owns. (used for lookup)
 345  *   nh_vholds_list: a linked list of all vholds host owns. (used for iteration)
 346  *   nh_shrlist: a list of all active share resevations on the client side.
 347  *   nh_reclaimer: a pointer to reclamation thread (kthread_t)
 348  *                 NULL if reclamation thread doesn't exist
 349  */
 350 struct nlm_host {
 351         kmutex_t                nh_lock;                /* (c) */
 352         volatile uint_t         nh_refs;                /* (z) */
 353         TAILQ_ENTRY(nlm_host)   nh_link;                /* (z) */
 354         avl_node_t              nh_by_addr;             /* (z) */
 355         char                    *nh_name;               /* (c) */
 356         char                    *nh_netid;              /* (c) */
 357         struct knetconfig       nh_knc;                 /* (c) */
 358         struct netbuf           nh_addr;                /* (c) */
 359         sysid_t                 nh_sysid;               /* (c) */
 360         int32_t                 nh_state;               /* (z) */
 361         clock_t                 nh_idle_timeout;        /* (z) */
 362         uint8_t                 nh_flags;               /* (z) */
 363         kcondvar_t              nh_recl_cv;             /* (z) */
 364         kcondvar_t              nh_rpcb_cv;             /* (l) */
 365         enum clnt_stat          nh_rpcb_ustat;          /* (l) */
 366         enum nlm_rpcb_state     nh_rpcb_state;          /* (l) */
 367         struct nlm_rpch_list    nh_rpchc;               /* (l) */
 368         mod_hash_t              *nh_vholds_by_vp;       /* (l) */
 369         struct nlm_vhold_list   nh_vholds_list;         /* (l) */
 370         struct nlm_shres        *nh_shrlist;            /* (l) */
 371         kthread_t               *nh_reclaimer;          /* (l) */
 372 };
 373 TAILQ_HEAD(nlm_host_list, nlm_host);
 374 
 375 /*
 376  * nlm_nsm structure describes RPC client handle that can be
 377  * used to communicate with local NSM via kRPC.
 378  *
 379  * We need to wrap handle with nlm_nsm structure because kRPC
 380  * can not share one handle between several threads. It's assumed
 381  * that NLM uses only one NSM handle per zone, thus all RPC operations
 382  * on NSM's handle are serialized using nlm_nsm->sem semaphore.
 383  *
 384  * nlm_nsm also contains refcnt field used for reference counting.
 385  * It's used because there exist a possibility of simultaneous
 386  * execution of NLM shutdown operation and host monitor/unmonitor
 387  * operations.
 388  *
 389  * struct nlm_nsm:
 390  *  ns_sem: a semaphore for serialization network operations to statd
 391  *  ns_knc: a kneconfig describing transport that is used for communication
 392  *  ns_addr: an address of local statd we're talking to
 393  *  ns_handle: an RPC handle used for talking to local statd using the status
 394  *      monitor protocol (SM_PROG)
 395  *  ns_addr_handle: an RPC handle used for talking to local statd using the
 396  *      address registration protocol (NSM_ADDR_PROGRAM)
 397  */
 398 struct nlm_nsm {
 399         ksema_t                 ns_sem;
 400         struct knetconfig       ns_knc;          /* (c) */
 401         struct netbuf           ns_addr;         /* (c) */
 402         CLIENT                  *ns_handle;      /* (c) */
 403         CLIENT                  *ns_addr_handle; /* (c) */
 404 };
 405 
 406 /*
 407  * Could use flock.h flk_nlm_status_t instead, but
 408  * prefer our own enum with initial zero...
 409  */
 410 typedef enum {
 411         NLM_ST_DOWN = 0,
 412         NLM_ST_STOPPING,
 413         NLM_ST_UP,
 414         NLM_ST_STARTING
 415 } nlm_run_status_t;
 416 
 417 /*
 418  * nlm_globals structure allows NLM be zone aware. The structure
 419  * collects all "global variables" NLM has for each zone.
 420  *
 421  * struct nlm_globals:
 422  * lock: mutex protecting all operations inside given zone
 423  * grace_threshold: grace period expiration time (in ticks)
 424  * lockd_pid: PID of lockd user space daemon
 425  * run_status: run status of klmmod inside given zone
 426  * nsm_state: state obtained from local statd during klmmod startup
 427  * nlm_gc_thread: garbage collector thread
 428  * nlm_gc_sched_cv: condvar that can be signalled to wakeup GC
 429  * nlm_gc_finish_cv: condvar that is signalled just before GC thread exits
 430  * nlm_nsm: an object describing RPC handle used for talking to local statd
 431  * nlm_hosts_tree: an AVL tree of all hosts in the given zone
 432  *                 (used for hosts lookup by <netid, address> pair)
 433  * nlm_hosts_hash: a hash table of all hosts in the given zone
 434  *                 (used for hosts lookup by sysid)
 435  * nlm_idle_hosts: a list of all hosts that are idle state (i.e. unused)
 436  * nlm_slocks: a list of all client-side sleeping locks in the zone
 437  * cn_idle_tmo: a value of idle timeout (in seconds) obtained from lockd
 438  * grace_period: a value of grace period (in seconds) obtained from lockd
 439  * retrans_tmo: a value of retransmission timeout (in seconds) obtained
 440  *              from lockd.
 441  * clean_lock: mutex used to serialize clear_locks calls.
 442  * nlm_link: a list node used for keeping all nlm_globals objects
 443  *           in one global linked list.
 444  */
 445 struct nlm_globals {
 446         kmutex_t                        lock;
 447         clock_t                         grace_threshold;        /* (z) */
 448         pid_t                           lockd_pid;              /* (z) */
 449         nlm_run_status_t                run_status;             /* (z) */
 450         int32_t                         nsm_state;              /* (z) */
 451         kthread_t                       *nlm_gc_thread;         /* (z) */
 452         kcondvar_t                      nlm_gc_sched_cv;        /* (z) */
 453         kcondvar_t                      nlm_gc_finish_cv;       /* (z) */
 454         struct nlm_nsm                  nlm_nsm;                /* (z) */
 455         avl_tree_t                      nlm_hosts_tree;         /* (z) */
 456         mod_hash_t                      *nlm_hosts_hash;        /* (z) */
 457         struct nlm_host_list            nlm_idle_hosts;         /* (z) */
 458         struct nlm_slock_list           nlm_slocks;             /* (z) */
 459         int                             cn_idle_tmo;            /* (z) */
 460         int                             grace_period;           /* (z) */
 461         int                             retrans_tmo;            /* (z) */
 462         kmutex_t                        clean_lock;             /* (c) */
 463         TAILQ_ENTRY(nlm_globals)        nlm_link;               /* (g) */
 464 };
 465 TAILQ_HEAD(nlm_globals_list, nlm_globals);
 466 
 467 
 468 /*
 469  * This is what we pass as the "owner handle" for NLM_LOCK.
 470  * This lets us find the blocked lock in NLM_GRANTED.
 471  * It also exposes on the wire what we're using as the
 472  * sysid for any server, which can be very helpful for
 473  * problem diagnosis.  (Observability is good).
 474  */
 475 struct nlm_owner_handle {
 476         sysid_t oh_sysid;               /* of remote host */
 477 };
 478 
 479 /*
 480  * Number retries NLM RPC call is repeatead in case of failure.
 481  * (used in case of conectionless transport).
 482  */
 483 #define NLM_RPC_RETRIES 5
 484 
 485 /*
 486  * Klmmod global variables
 487  */
 488 extern krwlock_t lm_lck;
 489 extern zone_key_t nlm_zone_key;
 490 
 491 /*
 492  * NLM interface functions (called directly by
 493  * either klmmod or klmpos)
 494  */
 495 extern int nlm_frlock(struct vnode *, int, struct flock64 *, int, u_offset_t,
 496     struct cred *, struct netobj *, struct flk_callback *, int);
 497 extern int nlm_shrlock(struct vnode *, int, struct shrlock *, int,
 498     struct netobj *, int);
 499 extern int nlm_safemap(const vnode_t *);
 500 extern int nlm_safelock(vnode_t *, const struct flock64 *, cred_t *);
 501 extern int nlm_has_sleep(const vnode_t *);
 502 extern void nlm_register_lock_locally(struct vnode *, struct nlm_host *,
 503     struct flock64 *, int, u_offset_t);
 504 int nlm_vp_active(const vnode_t *vp);
 505 void nlm_sysid_free(sysid_t);
 506 int nlm_vp_active(const vnode_t *);
 507 void nlm_unexport(struct exportinfo *);
 508 
 509 /*
 510  * NLM startup/shutdown
 511  */
 512 int nlm_svc_starting(struct nlm_globals *, struct file *,
 513     const char *, struct knetconfig *);
 514 void nlm_svc_stopping(struct nlm_globals *);
 515 int nlm_svc_add_ep(struct file *, const char *, struct knetconfig *);
 516 
 517 /*
 518  * NLM suspend/resume
 519  */
 520 void nlm_cprsuspend(void);
 521 void nlm_cprresume(void);
 522 
 523 /*
 524  * NLM internal functions for initialization.
 525  */
 526 void nlm_init(void);
 527 void nlm_rpc_init(void);
 528 void nlm_rpc_cache_destroy(struct nlm_host *);
 529 void nlm_globals_register(struct nlm_globals *);
 530 void nlm_globals_unregister(struct nlm_globals *);
 531 sysid_t nlm_sysid_alloc(void);
 532 
 533 /*
 534  * Client reclamation/cancelation
 535  */
 536 void nlm_reclaim_client(struct nlm_globals *, struct nlm_host *);
 537 void nlm_client_cancel_all(struct nlm_globals *, struct nlm_host *);
 538 
 539 /* (nlm_rpc_clnt.c) */
 540 enum clnt_stat nlm_null_rpc(CLIENT *, rpcvers_t);
 541 enum clnt_stat nlm_test_rpc(nlm4_testargs *, nlm4_testres *,
 542     CLIENT *, rpcvers_t);
 543 enum clnt_stat nlm_lock_rpc(nlm4_lockargs *, nlm4_res *,
 544     CLIENT *, rpcvers_t);
 545 enum clnt_stat nlm_cancel_rpc(nlm4_cancargs *, nlm4_res *,
 546     CLIENT *, rpcvers_t);
 547 enum clnt_stat nlm_unlock_rpc(nlm4_unlockargs *, nlm4_res *,
 548     CLIENT *, rpcvers_t);
 549 enum clnt_stat nlm_share_rpc(nlm4_shareargs *, nlm4_shareres *,
 550     CLIENT *, rpcvers_t);
 551 enum clnt_stat nlm_unshare_rpc(nlm4_shareargs *, nlm4_shareres *,
 552     CLIENT *, rpcvers_t);
 553 
 554 
 555 /*
 556  * RPC service functions.
 557  * nlm_dispatch.c
 558  */
 559 void nlm_prog_3(struct svc_req *rqstp, SVCXPRT *transp);
 560 void nlm_prog_4(struct svc_req *rqstp, SVCXPRT *transp);
 561 
 562 /*
 563  * Functions for working with knetconfigs (nlm_netconfig.c)
 564  */
 565 const char *nlm_knc_to_netid(struct knetconfig *);
 566 int nlm_knc_from_netid(const char *, struct knetconfig *);
 567 
 568 /*
 569  * NLM host functions (nlm_impl.c)
 570  */
 571 struct nlm_host *nlm_host_findcreate(struct nlm_globals *, char *,
 572     const char *, struct netbuf *);
 573 struct nlm_host *nlm_host_find(struct nlm_globals *,
 574     const char *, struct netbuf *);
 575 struct nlm_host *nlm_host_find_by_sysid(struct nlm_globals *, sysid_t);
 576 void nlm_host_release(struct nlm_globals *, struct nlm_host *);
 577 
 578 void nlm_host_monitor(struct nlm_globals *, struct nlm_host *, int);
 579 void nlm_host_unmonitor(struct nlm_globals *, struct nlm_host *);
 580 
 581 void nlm_host_notify_server(struct nlm_host *, int32_t);
 582 void nlm_host_notify_client(struct nlm_host *, int32_t);
 583 
 584 int nlm_host_get_state(struct nlm_host *);
 585 
 586 struct nlm_vhold *nlm_vhold_get(struct nlm_host *, vnode_t *);
 587 void nlm_vhold_release(struct nlm_host *, struct nlm_vhold *);
 588 struct nlm_vhold *nlm_vhold_find_locked(struct nlm_host *, const vnode_t *);
 589 
 590 struct nlm_slock *nlm_slock_register(struct nlm_globals *,
 591     struct nlm_host *, struct nlm4_lock *, struct vnode *);
 592 void nlm_slock_unregister(struct nlm_globals *, struct nlm_slock *);
 593 int nlm_slock_wait(struct nlm_globals *, struct nlm_slock *, uint_t);
 594 int nlm_slock_grant(struct nlm_globals *,
 595     struct nlm_host *, struct nlm4_lock *);
 596 void nlm_host_cancel_slocks(struct nlm_globals *, struct nlm_host *);
 597 
 598 int nlm_slreq_register(struct nlm_host *,
 599     struct nlm_vhold *, struct flock64 *);
 600 int nlm_slreq_unregister(struct nlm_host *,
 601     struct nlm_vhold *, struct flock64 *);
 602 
 603 void nlm_shres_track(struct nlm_host *, vnode_t *, struct shrlock *);
 604 void nlm_shres_untrack(struct nlm_host *, vnode_t *, struct shrlock *);
 605 struct nlm_shres *nlm_get_active_shres(struct nlm_host *);
 606 void nlm_free_shrlist(struct nlm_shres *);
 607 
 608 int nlm_host_wait_grace(struct nlm_host *);
 609 int nlm_host_cmp(const void *, const void *);
 610 void nlm_copy_netobj(struct netobj *, struct netobj *);
 611 
 612 int nlm_host_get_rpc(struct nlm_host *, int, nlm_rpc_t **);
 613 void nlm_host_rele_rpc(struct nlm_host *, nlm_rpc_t *);
 614 
 615 /*
 616  * NLM server functions (nlm_service.c)
 617  */
 618 int nlm_vp_active(const vnode_t *vp);
 619 void nlm_do_notify1(nlm_sm_status *, void *, struct svc_req *);
 620 void nlm_do_notify2(nlm_sm_status *, void *, struct svc_req *);
 621 void nlm_do_test(nlm4_testargs *, nlm4_testres *,
 622     struct svc_req *, nlm_testres_cb);
 623 void nlm_do_lock(nlm4_lockargs *, nlm4_res *, struct svc_req *,
 624     nlm_reply_cb, nlm_res_cb, nlm_testargs_cb);
 625 void nlm_do_cancel(nlm4_cancargs *, nlm4_res *,
 626     struct svc_req *, nlm_res_cb);
 627 void nlm_do_unlock(nlm4_unlockargs *, nlm4_res *,
 628     struct svc_req *, nlm_res_cb);
 629 void nlm_do_granted(nlm4_testargs *, nlm4_res *,
 630     struct svc_req *, nlm_res_cb);
 631 void nlm_do_share(nlm4_shareargs *, nlm4_shareres *, struct svc_req *);
 632 void nlm_do_unshare(nlm4_shareargs *, nlm4_shareres *, struct svc_req *);
 633 void nlm_do_free_all(nlm4_notify *, void *, struct svc_req *);
 634 
 635 /*
 636  * NLM RPC functions
 637  */
 638 enum clnt_stat nlm_clnt_call(CLIENT *, rpcproc_t, xdrproc_t,
 639     caddr_t, xdrproc_t, caddr_t, struct timeval);
 640 bool_t nlm_caller_is_local(SVCXPRT *);
 641 
 642 #endif  /* _NLM_NLM_H_ */