1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 */
26 /*
27 * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved.
28 */
29 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
30 /* All Rights Reserved */
31 /*
32 * Portions of this source code were derived from Berkeley
33 * 4.3 BSD under license from the Regents of the University of
34 * California.
35 */
36
37 /*
38 * svc.c, Server-side remote procedure call interface.
39 *
40 * There are two sets of procedures here. The xprt routines are
41 * for handling transport handles. The svc routines handle the
42 * list of service routines.
43 *
44 */
45
46 #include "mt.h"
47 #include "rpc_mt.h"
48 #include <assert.h>
49 #include <errno.h>
50 #include <sys/types.h>
51 #include <stropts.h>
52 #include <sys/conf.h>
53 #include <rpc/rpc.h>
54 #ifdef PORTMAP
55 #include <rpc/pmap_clnt.h>
56 #endif
57 #include <sys/poll.h>
58 #include <netconfig.h>
59 #include <syslog.h>
60 #include <stdlib.h>
61 #include <unistd.h>
62 #include <string.h>
63 #include <limits.h>
64
65 extern bool_t __svc_get_door_cred();
66 extern bool_t __rpc_get_local_cred();
67
68 SVCXPRT **svc_xports;
69 static int nsvc_xports; /* total number of svc_xports allocated */
70
71 XDR **svc_xdrs; /* common XDR receive area */
72 int nsvc_xdrs; /* total number of svc_xdrs allocated */
73
74 int __rpc_use_pollfd_done; /* to unlimit the number of connections */
75
76 #define NULL_SVC ((struct svc_callout *)0)
77 #define RQCRED_SIZE 400 /* this size is excessive */
78
79 /*
80 * The services list
81 * Each entry represents a set of procedures (an rpc program).
82 * The dispatch routine takes request structs and runs the
83 * appropriate procedure.
84 */
85 static struct svc_callout {
86 struct svc_callout *sc_next;
87 rpcprog_t sc_prog;
88 rpcvers_t sc_vers;
89 char *sc_netid;
90 void (*sc_dispatch)();
91 } *svc_head;
92 extern rwlock_t svc_lock;
93
94 static struct svc_callout *svc_find();
95 int _svc_prog_dispatch();
96 void svc_getreq_common();
97 char *strdup();
98
99 extern mutex_t svc_door_mutex;
100 extern cond_t svc_door_waitcv;
101 extern int svc_ndoorfds;
102 extern SVCXPRT_LIST *_svc_xprtlist;
103 extern mutex_t xprtlist_lock;
104 extern void __svc_rm_from_xlist();
105
106 extern fd_set _new_svc_fdset;
107
108 /*
109 * If the allocated array of reactor is too small, this value is used as a
110 * margin. This reduces the number of allocations.
111 */
112 #define USER_FD_INCREMENT 5
113
114 static void add_pollfd(int fd, short events);
115 static void remove_pollfd(int fd);
116 static void __svc_remove_input_of_fd(int fd);
117
118 /*
119 * Data used to handle reactor:
120 * - one file descriptor we listen to,
121 * - one callback we call if the fd pops,
122 * - and a cookie passed as a parameter to the callback.
123 *
124 * The structure is an array indexed on the file descriptor. Each entry is
125 * pointing to the first element of a double-linked list of callback.
126 * only one callback may be associated to a couple (fd, event).
127 */
128
129 struct _svc_user_fd_head;
130
131 typedef struct {
132 struct _svc_user_fd_node *next;
133 struct _svc_user_fd_node *previous;
134 } _svc_user_link;
135
136 typedef struct _svc_user_fd_node {
137 _svc_user_link lnk;
138 svc_input_id_t id;
139 int fd;
140 unsigned int events;
141 svc_callback_t callback;
142 void* cookie;
143 } _svc_user_fd_node;
144
145 typedef struct _svc_user_fd_head {
146 struct _svc_user_fd_node *list;
147 unsigned int mask; /* logical OR of all sub-masks */
148 } _svc_user_fd_head;
149
150
151 /* Array of defined reactor - indexed on file descriptor */
152 static _svc_user_fd_head *svc_userfds = NULL;
153
154 /* current size of file descriptor */
155 static int svc_nuserfds = 0;
156
157 /* Mutex to ensure MT safe operations for user fds callbacks. */
158 static mutex_t svc_userfds_lock = DEFAULTMUTEX;
159
160
161 /*
162 * This structure is used to have constant time alogrithms. There is an array
163 * of this structure as large as svc_nuserfds. When the user is registering a
164 * new callback, the address of the created structure is stored in a cell of
165 * this array. The address of this cell is the returned unique identifier.
166 *
167 * On removing, the id is given by the user, then we know if this cell is
168 * filled or not (with free). If it is free, we return an error. Otherwise,
169 * we can free the structure pointed by fd_node.
170 *
171 * On insertion, we use the linked list created by (first_free,
172 * next_free). In this way with a constant time computation, we can give a
173 * correct index to the user.
174 */
175
176 typedef struct _svc_management_user_fd {
177 bool_t free;
178 union {
179 svc_input_id_t next_free;
180 _svc_user_fd_node *fd_node;
181 } data;
182 } _svc_management_user_fd;
183
184 /* index to the first free elem */
185 static svc_input_id_t first_free = (svc_input_id_t)-1;
186 /* the size of this array is the same as svc_nuserfds */
187 static _svc_management_user_fd* user_fd_mgt_array = NULL;
188
189 /* current size of user_fd_mgt_array */
190 static int svc_nmgtuserfds = 0;
191
192
193 /* Define some macros to access data associated to registration ids. */
194 #define node_from_id(id) (user_fd_mgt_array[(int)id].data.fd_node)
195 #define is_free_id(id) (user_fd_mgt_array[(int)id].free)
196
197 #ifndef POLLSTANDARD
198 #define POLLSTANDARD \
199 (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND| \
200 POLLWRBAND|POLLERR|POLLHUP|POLLNVAL)
201 #endif
202
203 /*
204 * To free an Id, we set the cell as free and insert its address in the list
205 * of free cell.
206 */
207
208 static void
209 _svc_free_id(const svc_input_id_t id)
210 {
211 assert(((int)id >= 0) && ((int)id < svc_nmgtuserfds));
212 user_fd_mgt_array[(int)id].free = TRUE;
213 user_fd_mgt_array[(int)id].data.next_free = first_free;
214 first_free = id;
215 }
216
217 /*
218 * To get a free cell, we just have to take it from the free linked list and
219 * set the flag to "not free". This function also allocates new memory if
220 * necessary
221 */
222 static svc_input_id_t
223 _svc_attribute_new_id(_svc_user_fd_node *node)
224 {
225 int selected_index = (int)first_free;
226 assert(node != NULL);
227
228 if (selected_index == -1) {
229 /* Allocate new entries */
230 int L_inOldSize = svc_nmgtuserfds;
231 int i;
232 _svc_management_user_fd *tmp;
233
234 svc_nmgtuserfds += USER_FD_INCREMENT;
235
236 tmp = realloc(user_fd_mgt_array,
237 svc_nmgtuserfds * sizeof (_svc_management_user_fd));
238
239 if (tmp == NULL) {
240 syslog(LOG_ERR, "_svc_attribute_new_id: out of memory");
241 svc_nmgtuserfds = L_inOldSize;
242 errno = ENOMEM;
243 return ((svc_input_id_t)-1);
244 }
245
246 user_fd_mgt_array = tmp;
247
248 for (i = svc_nmgtuserfds - 1; i >= L_inOldSize; i--)
249 _svc_free_id((svc_input_id_t)i);
250 selected_index = (int)first_free;
251 }
252
253 node->id = (svc_input_id_t)selected_index;
254 first_free = user_fd_mgt_array[selected_index].data.next_free;
255
256 user_fd_mgt_array[selected_index].data.fd_node = node;
257 user_fd_mgt_array[selected_index].free = FALSE;
258
259 return ((svc_input_id_t)selected_index);
260 }
261
262 /*
263 * Access to a pollfd treatment. Scan all the associated callbacks that have
264 * at least one bit in their mask that masks a received event.
265 *
266 * If event POLLNVAL is received, we check that one callback processes it, if
267 * not, then remove the file descriptor from the poll. If there is one, let
268 * the user do the work.
269 */
270 void
271 __svc_getreq_user(struct pollfd *pfd)
272 {
273 int fd = pfd->fd;
274 short revents = pfd->revents;
275 bool_t invalHandled = FALSE;
276 _svc_user_fd_node *node;
277
278 (void) mutex_lock(&svc_userfds_lock);
279
280 if ((fd < 0) || (fd >= svc_nuserfds)) {
281 (void) mutex_unlock(&svc_userfds_lock);
282 return;
283 }
284
285 node = svc_userfds[fd].list;
286
287 /* check if at least one mask fits */
288 if (0 == (revents & svc_userfds[fd].mask)) {
289 (void) mutex_unlock(&svc_userfds_lock);
290 return;
291 }
292
293 while ((svc_userfds[fd].mask != 0) && (node != NULL)) {
294 /*
295 * If one of the received events maps the ones the node listens
296 * to
297 */
298 _svc_user_fd_node *next = node->lnk.next;
299
300 if (node->callback != NULL) {
301 if (node->events & revents) {
302 if (revents & POLLNVAL) {
303 invalHandled = TRUE;
304 }
305
306 /*
307 * The lock must be released before calling the
308 * user function, as this function can call
309 * svc_remove_input() for example.
310 */
311 (void) mutex_unlock(&svc_userfds_lock);
312 node->callback(node->id, node->fd,
313 node->events & revents, node->cookie);
314 /*
315 * Do not use the node structure anymore, as it
316 * could have been deallocated by the previous
317 * callback.
318 */
319 (void) mutex_lock(&svc_userfds_lock);
320 }
321 }
322 node = next;
323 }
324
325 if ((revents & POLLNVAL) && !invalHandled)
326 __svc_remove_input_of_fd(fd);
327 (void) mutex_unlock(&svc_userfds_lock);
328 }
329
330
331 /*
332 * Check if a file descriptor is associated with a user reactor.
333 * To do this, just check that the array indexed on fd has a non-void linked
334 * list (ie. first element is not NULL)
335 */
336 bool_t
337 __is_a_userfd(int fd)
338 {
339 /* Checks argument */
340 if ((fd < 0) || (fd >= svc_nuserfds))
341 return (FALSE);
342 return ((svc_userfds[fd].mask == 0x0000)? FALSE:TRUE);
343 }
344
345 /* free everything concerning user fd */
346 /* used in svc_run.c => no static */
347
348 void
349 __destroy_userfd(void)
350 {
351 int one_fd;
352 /* Clean user fd */
353 if (svc_userfds != NULL) {
354 for (one_fd = 0; one_fd < svc_nuserfds; one_fd++) {
355 _svc_user_fd_node *node;
356
357 node = svc_userfds[one_fd].list;
358 while (node != NULL) {
359 _svc_user_fd_node *tmp = node;
360 _svc_free_id(node->id);
361 node = node->lnk.next;
362 free(tmp);
363 }
364 }
365
366 free(user_fd_mgt_array);
367 user_fd_mgt_array = NULL;
368 first_free = (svc_input_id_t)-1;
369
370 free(svc_userfds);
371 svc_userfds = NULL;
372 svc_nuserfds = 0;
373 }
374 }
375
376 /*
377 * Remove all the callback associated with a fd => useful when the fd is
378 * closed for instance
379 */
380 static void
381 __svc_remove_input_of_fd(int fd)
382 {
383 _svc_user_fd_node **pnode;
384 _svc_user_fd_node *tmp;
385
386 if ((fd < 0) || (fd >= svc_nuserfds))
387 return;
388
389 pnode = &svc_userfds[fd].list;
390 while ((tmp = *pnode) != NULL) {
391 *pnode = tmp->lnk.next;
392
393 _svc_free_id(tmp->id);
394 free(tmp);
395 }
396
397 svc_userfds[fd].mask = 0;
398 }
399
400 /*
401 * Allow user to add an fd in the poll list. If it does not succeed, return
402 * -1. Otherwise, return a svc_id
403 */
404
405 svc_input_id_t
406 svc_add_input(int user_fd, unsigned int events,
407 svc_callback_t user_callback, void *cookie)
408 {
409 _svc_user_fd_node *new_node;
410
411 if (user_fd < 0) {
412 errno = EINVAL;
413 return ((svc_input_id_t)-1);
414 }
415
416 if ((events == 0x0000) ||
417 (events & ~(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\
418 POLLWRBAND|POLLERR|POLLHUP|POLLNVAL))) {
419 errno = EINVAL;
420 return ((svc_input_id_t)-1);
421 }
422
423 (void) mutex_lock(&svc_userfds_lock);
424
425 if ((user_fd < svc_nuserfds) &&
426 (svc_userfds[user_fd].mask & events) != 0) {
427 /* Already registrated call-back */
428 errno = EEXIST;
429 (void) mutex_unlock(&svc_userfds_lock);
430 return ((svc_input_id_t)-1);
431 }
432
433 /* Handle memory allocation. */
434 if (user_fd >= svc_nuserfds) {
435 int oldSize = svc_nuserfds;
436 int i;
437 _svc_user_fd_head *tmp;
438
439 svc_nuserfds = (user_fd + 1) + USER_FD_INCREMENT;
440
441 tmp = realloc(svc_userfds,
442 svc_nuserfds * sizeof (_svc_user_fd_head));
443
444 if (tmp == NULL) {
445 syslog(LOG_ERR, "svc_add_input: out of memory");
446 svc_nuserfds = oldSize;
447 errno = ENOMEM;
448 (void) mutex_unlock(&svc_userfds_lock);
449 return ((svc_input_id_t)-1);
450 }
451
452 svc_userfds = tmp;
453
454 for (i = oldSize; i < svc_nuserfds; i++) {
455 svc_userfds[i].list = NULL;
456 svc_userfds[i].mask = 0;
457 }
458 }
459
460 new_node = malloc(sizeof (_svc_user_fd_node));
461 if (new_node == NULL) {
462 syslog(LOG_ERR, "svc_add_input: out of memory");
463 errno = ENOMEM;
464 (void) mutex_unlock(&svc_userfds_lock);
465 return ((svc_input_id_t)-1);
466 }
467
468 /* create a new node */
469 new_node->fd = user_fd;
470 new_node->events = events;
471 new_node->callback = user_callback;
472 new_node->cookie = cookie;
473
474 if (_svc_attribute_new_id(new_node) == -1) {
475 (void) mutex_unlock(&svc_userfds_lock);
476 free(new_node);
477 return ((svc_input_id_t)-1);
478 }
479
480 /* Add the new element at the beginning of the list. */
481 if (svc_userfds[user_fd].list != NULL)
482 svc_userfds[user_fd].list->lnk.previous = new_node;
483 new_node->lnk.next = svc_userfds[user_fd].list;
484 new_node->lnk.previous = NULL;
485
486 svc_userfds[user_fd].list = new_node;
487
488 /* refresh global mask for this file desciptor */
489 svc_userfds[user_fd].mask |= events;
490
491 /* refresh mask for the poll */
492 add_pollfd(user_fd, (svc_userfds[user_fd].mask));
493
494 (void) mutex_unlock(&svc_userfds_lock);
495 return (new_node->id);
496 }
497
498 int
499 svc_remove_input(svc_input_id_t id)
500 {
501 _svc_user_fd_node* node;
502 _svc_user_fd_node* next;
503 _svc_user_fd_node* previous;
504 int fd; /* caching optim */
505
506 (void) mutex_lock(&svc_userfds_lock);
507
508 /* Immediately update data for id management */
509 if (user_fd_mgt_array == NULL || id >= svc_nmgtuserfds ||
510 is_free_id(id)) {
511 errno = EINVAL;
512 (void) mutex_unlock(&svc_userfds_lock);
513 return (-1);
514 }
515
516 node = node_from_id(id);
517 assert(node != NULL);
518
519 _svc_free_id(id);
520 next = node->lnk.next;
521 previous = node->lnk.previous;
522 fd = node->fd; /* caching optim */
523
524 /* Remove this node from the list. */
525 if (previous != NULL) {
526 previous->lnk.next = next;
527 } else {
528 assert(svc_userfds[fd].list == node);
529 svc_userfds[fd].list = next;
530 }
531 if (next != NULL)
532 next->lnk.previous = previous;
533
534 /* Remove the node flags from the global mask */
535 svc_userfds[fd].mask ^= node->events;
536
537 free(node);
538 if (svc_userfds[fd].mask == 0) {
539 assert(svc_userfds[fd].list == NULL);
540 remove_pollfd(fd);
541 } else {
542 assert(svc_userfds[fd].list != NULL);
543 }
544 /* <=> CLEAN NEEDED TO SHRINK MEMORY USAGE */
545
546 (void) mutex_unlock(&svc_userfds_lock);
547 return (0);
548 }
549
550 /*
551 * Provides default service-side functions for authentication flavors
552 * that do not use all the fields in struct svc_auth_ops.
553 */
554
555 /*ARGSUSED*/
556 static int
557 authany_wrap(AUTH *auth, XDR *xdrs, xdrproc_t xfunc, caddr_t xwhere)
558 {
559 return (*xfunc)(xdrs, xwhere);
560 }
561
562 struct svc_auth_ops svc_auth_any_ops = {
563 authany_wrap,
564 authany_wrap,
565 };
566
567 /*
568 * Return pointer to server authentication structure.
569 */
570 SVCAUTH *
571 __svc_get_svcauth(SVCXPRT *xprt)
572 {
573 /* LINTED pointer alignment */
574 return (&SVC_XP_AUTH(xprt));
575 }
576
577 /*
578 * A callback routine to cleanup after a procedure is executed.
579 */
580 void (*__proc_cleanup_cb)() = NULL;
581
582 void *
583 __svc_set_proc_cleanup_cb(void *cb)
584 {
585 void *tmp = (void *)__proc_cleanup_cb;
586
587 __proc_cleanup_cb = (void (*)())cb;
588 return (tmp);
589 }
590
591 /* *************** SVCXPRT related stuff **************** */
592
593
594 static int pollfd_shrinking = 1;
595
596
597 /*
598 * Add fd to svc_pollfd
599 */
600 static void
601 add_pollfd(int fd, short events)
602 {
603 if (fd < FD_SETSIZE) {
604 FD_SET(fd, &svc_fdset);
605 #if !defined(_LP64)
606 FD_SET(fd, &_new_svc_fdset);
607 #endif
608 svc_nfds++;
609 svc_nfds_set++;
610 if (fd >= svc_max_fd)
611 svc_max_fd = fd + 1;
612 }
613 if (fd >= svc_max_pollfd)
614 svc_max_pollfd = fd + 1;
615 if (svc_max_pollfd > svc_pollfd_allocd) {
616 int i = svc_pollfd_allocd;
617 pollfd_t *tmp;
618 do {
619 svc_pollfd_allocd += POLLFD_EXTEND;
620 } while (svc_max_pollfd > svc_pollfd_allocd);
621 tmp = realloc(svc_pollfd,
622 sizeof (pollfd_t) * svc_pollfd_allocd);
623 if (tmp != NULL) {
624 svc_pollfd = tmp;
625 for (; i < svc_pollfd_allocd; i++)
626 POLLFD_CLR(i, tmp);
627 } else {
628 /*
629 * give an error message; undo fdset setting
630 * above; reset the pollfd_shrinking flag.
631 * because of this poll will not be done
632 * on these fds.
633 */
634 if (fd < FD_SETSIZE) {
635 FD_CLR(fd, &svc_fdset);
636 #if !defined(_LP64)
637 FD_CLR(fd, &_new_svc_fdset);
638 #endif
639 svc_nfds--;
640 svc_nfds_set--;
641 if (fd == (svc_max_fd - 1))
642 svc_max_fd--;
643 }
644 if (fd == (svc_max_pollfd - 1))
645 svc_max_pollfd--;
646 pollfd_shrinking = 0;
647 syslog(LOG_ERR, "add_pollfd: out of memory");
648 _exit(1);
649 }
650 }
651 svc_pollfd[fd].fd = fd;
652 svc_pollfd[fd].events = events;
653 svc_npollfds++;
654 svc_npollfds_set++;
655 }
656
657 /*
658 * the fd is still active but only the bit in fdset is cleared.
659 * do not subtract svc_nfds or svc_npollfds
660 */
661 void
662 clear_pollfd(int fd)
663 {
664 if (fd < FD_SETSIZE && FD_ISSET(fd, &svc_fdset)) {
665 FD_CLR(fd, &svc_fdset);
666 #if !defined(_LP64)
667 FD_CLR(fd, &_new_svc_fdset);
668 #endif
669 svc_nfds_set--;
670 }
671 if (fd < svc_pollfd_allocd && POLLFD_ISSET(fd, svc_pollfd)) {
672 POLLFD_CLR(fd, svc_pollfd);
673 svc_npollfds_set--;
674 }
675 }
676
677 /*
678 * sets the bit in fdset for an active fd so that poll() is done for that
679 */
680 void
681 set_pollfd(int fd, short events)
682 {
683 if (fd < FD_SETSIZE) {
684 FD_SET(fd, &svc_fdset);
685 #if !defined(_LP64)
686 FD_SET(fd, &_new_svc_fdset);
687 #endif
688 svc_nfds_set++;
689 }
690 if (fd < svc_pollfd_allocd) {
691 svc_pollfd[fd].fd = fd;
692 svc_pollfd[fd].events = events;
693 svc_npollfds_set++;
694 }
695 }
696
697 /*
698 * remove a svc_pollfd entry; it does not shrink the memory
699 */
700 static void
701 remove_pollfd(int fd)
702 {
703 clear_pollfd(fd);
704 if (fd == (svc_max_fd - 1))
705 svc_max_fd--;
706 svc_nfds--;
707 if (fd == (svc_max_pollfd - 1))
708 svc_max_pollfd--;
709 svc_npollfds--;
710 }
711
712 /*
713 * delete a svc_pollfd entry; it shrinks the memory
714 * use remove_pollfd if you do not want to shrink
715 */
716 static void
717 delete_pollfd(int fd)
718 {
719 remove_pollfd(fd);
720 if (pollfd_shrinking && svc_max_pollfd <
721 (svc_pollfd_allocd - POLLFD_SHRINK)) {
722 do {
723 svc_pollfd_allocd -= POLLFD_SHRINK;
724 } while (svc_max_pollfd < (svc_pollfd_allocd - POLLFD_SHRINK));
725 svc_pollfd = realloc(svc_pollfd,
726 sizeof (pollfd_t) * svc_pollfd_allocd);
727 if (svc_pollfd == NULL) {
728 syslog(LOG_ERR, "delete_pollfd: out of memory");
729 _exit(1);
730 }
731 }
732 }
733
734
735 /*
736 * Activate a transport handle.
737 */
738 void
739 xprt_register(const SVCXPRT *xprt)
740 {
741 int fd = xprt->xp_fd;
742 #ifdef CALLBACK
743 extern void (*_svc_getreqset_proc)();
744 #endif
745 /* VARIABLES PROTECTED BY svc_fd_lock: svc_xports, svc_fdset */
746
747 (void) rw_wrlock(&svc_fd_lock);
748 if (svc_xports == NULL) {
749 /* allocate some small amount first */
750 svc_xports = calloc(FD_INCREMENT, sizeof (SVCXPRT *));
751 if (svc_xports == NULL) {
752 syslog(LOG_ERR, "xprt_register: out of memory");
753 _exit(1);
754 }
755 nsvc_xports = FD_INCREMENT;
756
757 #ifdef CALLBACK
758 /*
759 * XXX: This code does not keep track of the server state.
760 *
761 * This provides for callback support. When a client
762 * recv's a call from another client on the server fd's,
763 * it calls _svc_getreqset_proc() which would return
764 * after serving all the server requests. Also look under
765 * clnt_dg.c and clnt_vc.c (clnt_call part of it)
766 */
767 _svc_getreqset_proc = svc_getreq_poll;
768 #endif
769 }
770
771 while (fd >= nsvc_xports) {
772 SVCXPRT **tmp_xprts = svc_xports;
773
774 /* time to expand svc_xprts */
775 tmp_xprts = realloc(svc_xports,
776 sizeof (SVCXPRT *) * (nsvc_xports + FD_INCREMENT));
777 if (tmp_xprts == NULL) {
778 syslog(LOG_ERR, "xprt_register : out of memory.");
779 _exit(1);
780 }
781
782 svc_xports = tmp_xprts;
783 (void) memset(&svc_xports[nsvc_xports], 0,
784 sizeof (SVCXPRT *) * FD_INCREMENT);
785 nsvc_xports += FD_INCREMENT;
786 }
787
788 svc_xports[fd] = (SVCXPRT *)xprt;
789
790 add_pollfd(fd, MASKVAL);
791
792 if (svc_polling) {
793 char dummy;
794
795 /*
796 * This happens only in one of the MT modes.
797 * Wake up poller.
798 */
799 (void) write(svc_pipe[1], &dummy, sizeof (dummy));
800 }
801 /*
802 * If already dispatching door based services, start
803 * dispatching TLI based services now.
804 */
805 (void) mutex_lock(&svc_door_mutex);
806 if (svc_ndoorfds > 0)
807 (void) cond_signal(&svc_door_waitcv);
808 (void) mutex_unlock(&svc_door_mutex);
809
810 if (svc_xdrs == NULL) {
811 /* allocate initial chunk */
812 svc_xdrs = calloc(FD_INCREMENT, sizeof (XDR *));
813 if (svc_xdrs != NULL)
814 nsvc_xdrs = FD_INCREMENT;
815 else {
816 syslog(LOG_ERR, "xprt_register : out of memory.");
817 _exit(1);
818 }
819 }
820 (void) rw_unlock(&svc_fd_lock);
821 }
822
823 /*
824 * De-activate a transport handle.
825 */
826 void
827 __xprt_unregister_private(const SVCXPRT *xprt, bool_t lock_not_held)
828 {
829 int fd = xprt->xp_fd;
830
831 if (lock_not_held)
832 (void) rw_wrlock(&svc_fd_lock);
833 if ((fd < nsvc_xports) && (svc_xports[fd] == xprt)) {
834 svc_xports[fd] = NULL;
835 delete_pollfd(fd);
836 }
837 if (lock_not_held)
838 (void) rw_unlock(&svc_fd_lock);
839 __svc_rm_from_xlist(&_svc_xprtlist, xprt, &xprtlist_lock);
840 }
841
842 void
843 xprt_unregister(const SVCXPRT *xprt)
844 {
845 __xprt_unregister_private(xprt, TRUE);
846 }
847
848 /* ********************** CALLOUT list related stuff ************* */
849
850 /*
851 * Add a service program to the callout list.
852 * The dispatch routine will be called when a rpc request for this
853 * program number comes in.
854 */
855 bool_t
856 svc_reg(const SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
857 void (*dispatch)(), const struct netconfig *nconf)
858 {
859 struct svc_callout *prev;
860 struct svc_callout *s, **s2;
861 struct netconfig *tnconf;
862 char *netid = NULL;
863 int flag = 0;
864
865 /* VARIABLES PROTECTED BY svc_lock: s, prev, svc_head */
866
867 if (xprt->xp_netid) {
868 netid = strdup(xprt->xp_netid);
869 flag = 1;
870 } else if (nconf && nconf->nc_netid) {
871 netid = strdup(nconf->nc_netid);
872 flag = 1;
873 } else if ((tnconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type))
874 != NULL) {
875 netid = strdup(tnconf->nc_netid);
876 flag = 1;
877 freenetconfigent(tnconf);
878 } /* must have been created with svc_raw_create */
879 if ((netid == NULL) && (flag == 1))
880 return (FALSE);
881
882 (void) rw_wrlock(&svc_lock);
883 if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
884 if (netid)
885 free(netid);
886 if (s->sc_dispatch == dispatch)
887 goto rpcb_it; /* he is registering another xptr */
888 (void) rw_unlock(&svc_lock);
889 return (FALSE);
890 }
891 s = malloc(sizeof (struct svc_callout));
892 if (s == NULL) {
893 if (netid)
894 free(netid);
895 (void) rw_unlock(&svc_lock);
896 return (FALSE);
897 }
898
899 s->sc_prog = prog;
900 s->sc_vers = vers;
901 s->sc_dispatch = dispatch;
902 s->sc_netid = netid;
903 s->sc_next = NULL;
904
905 /*
906 * The ordering of transports is such that the most frequently used
907 * one appears first. So add the new entry to the end of the list.
908 */
909 for (s2 = &svc_head; *s2 != NULL; s2 = &(*s2)->sc_next)
910 ;
911 *s2 = s;
912
913 if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
914 if ((((SVCXPRT *)xprt)->xp_netid = strdup(netid)) == NULL) {
915 syslog(LOG_ERR, "svc_reg : strdup failed.");
916 free(netid);
917 free(s);
918 *s2 = NULL;
919 (void) rw_unlock(&svc_lock);
920 return (FALSE);
921 }
922
923 rpcb_it:
924 (void) rw_unlock(&svc_lock);
925
926 /* now register the information with the local binder service */
927 if (nconf)
928 return (rpcb_set(prog, vers, nconf, &xprt->xp_ltaddr));
929 return (TRUE);
930 /*NOTREACHED*/
931 }
932
933 /*
934 * Remove a service program from the callout list.
935 */
936 void
937 svc_unreg(const rpcprog_t prog, const rpcvers_t vers)
938 {
939 struct svc_callout *prev;
940 struct svc_callout *s;
941
942 /* unregister the information anyway */
943 (void) rpcb_unset(prog, vers, NULL);
944
945 (void) rw_wrlock(&svc_lock);
946 while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
947 if (prev == NULL_SVC) {
948 svc_head = s->sc_next;
949 } else {
950 prev->sc_next = s->sc_next;
951 }
952 s->sc_next = NULL_SVC;
953 if (s->sc_netid)
954 free(s->sc_netid);
955 free(s);
956 }
957 (void) rw_unlock(&svc_lock);
958 }
959
960 #ifdef PORTMAP
961 /*
962 * Add a service program to the callout list.
963 * The dispatch routine will be called when a rpc request for this
964 * program number comes in.
965 * For version 2 portmappers.
966 */
967 bool_t
968 svc_register(SVCXPRT *xprt, rpcprog_t prog, rpcvers_t vers,
969 void (*dispatch)(), int protocol)
970 {
971 struct svc_callout *prev;
972 struct svc_callout *s;
973 struct netconfig *nconf;
974 char *netid = NULL;
975 int flag = 0;
976
977 if (xprt->xp_netid) {
978 netid = strdup(xprt->xp_netid);
979 flag = 1;
980 } else if ((ioctl(xprt->xp_fd, I_FIND, "timod") > 0) && ((nconf =
981 __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type)) != NULL)) {
982 /* fill in missing netid field in SVCXPRT */
983 netid = strdup(nconf->nc_netid);
984 flag = 1;
985 freenetconfigent(nconf);
986 } /* must be svc_raw_create */
987
988 if ((netid == NULL) && (flag == 1))
989 return (FALSE);
990
991 (void) rw_wrlock(&svc_lock);
992 if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
993 if (netid)
994 free(netid);
995 if (s->sc_dispatch == dispatch)
996 goto pmap_it; /* he is registering another xptr */
997 (void) rw_unlock(&svc_lock);
998 return (FALSE);
999 }
1000 s = malloc(sizeof (struct svc_callout));
1001 if (s == (struct svc_callout *)0) {
1002 if (netid)
1003 free(netid);
1004 (void) rw_unlock(&svc_lock);
1005 return (FALSE);
1006 }
1007 s->sc_prog = prog;
1008 s->sc_vers = vers;
1009 s->sc_dispatch = dispatch;
1010 s->sc_netid = netid;
1011 s->sc_next = svc_head;
1012 svc_head = s;
1013
1014 if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
1015 if ((xprt->xp_netid = strdup(netid)) == NULL) {
1016 syslog(LOG_ERR, "svc_register : strdup failed.");
1017 free(netid);
1018 svc_head = s->sc_next;
1019 free(s);
1020 (void) rw_unlock(&svc_lock);
1021 return (FALSE);
1022 }
1023
1024 pmap_it:
1025 (void) rw_unlock(&svc_lock);
1026 /* now register the information with the local binder service */
1027 if (protocol)
1028 return (pmap_set(prog, vers, protocol, xprt->xp_port));
1029 return (TRUE);
1030 }
1031
1032 /*
1033 * Remove a service program from the callout list.
1034 * For version 2 portmappers.
1035 */
1036 void
1037 svc_unregister(rpcprog_t prog, rpcvers_t vers)
1038 {
1039 struct svc_callout *prev;
1040 struct svc_callout *s;
1041
1042 (void) rw_wrlock(&svc_lock);
1043 while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
1044 if (prev == NULL_SVC) {
1045 svc_head = s->sc_next;
1046 } else {
1047 prev->sc_next = s->sc_next;
1048 }
1049 s->sc_next = NULL_SVC;
1050 if (s->sc_netid)
1051 free(s->sc_netid);
1052 free(s);
1053 /* unregister the information with the local binder service */
1054 (void) pmap_unset(prog, vers);
1055 }
1056 (void) rw_unlock(&svc_lock);
1057 }
1058 #endif /* PORTMAP */
1059
1060 /*
1061 * Search the callout list for a program number, return the callout
1062 * struct.
1063 * Also check for transport as well. Many routines such as svc_unreg
1064 * dont give any corresponding transport, so dont check for transport if
1065 * netid == NULL
1066 */
1067 static struct svc_callout *
1068 svc_find(rpcprog_t prog, rpcvers_t vers, struct svc_callout **prev, char *netid)
1069 {
1070 struct svc_callout *s, *p;
1071
1072 /* WRITE LOCK HELD ON ENTRY: svc_lock */
1073
1074 /* assert(RW_WRITE_HELD(&svc_lock)); */
1075 p = NULL_SVC;
1076 for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1077 if (((s->sc_prog == prog) && (s->sc_vers == vers)) &&
1078 ((netid == NULL) || (s->sc_netid == NULL) ||
1079 (strcmp(netid, s->sc_netid) == 0)))
1080 break;
1081 p = s;
1082 }
1083 *prev = p;
1084 return (s);
1085 }
1086
1087
1088 /* ******************* REPLY GENERATION ROUTINES ************ */
1089
1090 /*
1091 * Send a reply to an rpc request
1092 */
1093 bool_t
1094 svc_sendreply(const SVCXPRT *xprt, const xdrproc_t xdr_results,
1095 const caddr_t xdr_location)
1096 {
1097 struct rpc_msg rply;
1098
1099 rply.rm_direction = REPLY;
1100 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1101 rply.acpted_rply.ar_verf = xprt->xp_verf;
1102 rply.acpted_rply.ar_stat = SUCCESS;
1103 rply.acpted_rply.ar_results.where = xdr_location;
1104 rply.acpted_rply.ar_results.proc = xdr_results;
1105 return (SVC_REPLY((SVCXPRT *)xprt, &rply));
1106 }
1107
1108 /*
1109 * No procedure error reply
1110 */
1111 void
1112 svcerr_noproc(const SVCXPRT *xprt)
1113 {
1114 struct rpc_msg rply;
1115
1116 rply.rm_direction = REPLY;
1117 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1118 rply.acpted_rply.ar_verf = xprt->xp_verf;
1119 rply.acpted_rply.ar_stat = PROC_UNAVAIL;
1120 SVC_REPLY((SVCXPRT *)xprt, &rply);
1121 }
1122
1123 /*
1124 * Can't decode args error reply
1125 */
1126 void
1127 svcerr_decode(const SVCXPRT *xprt)
1128 {
1129 struct rpc_msg rply;
1130
1131 rply.rm_direction = REPLY;
1132 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1133 rply.acpted_rply.ar_verf = xprt->xp_verf;
1134 rply.acpted_rply.ar_stat = GARBAGE_ARGS;
1135 SVC_REPLY((SVCXPRT *)xprt, &rply);
1136 }
1137
1138 /*
1139 * Some system error
1140 */
1141 void
1142 svcerr_systemerr(const SVCXPRT *xprt)
1143 {
1144 struct rpc_msg rply;
1145
1146 rply.rm_direction = REPLY;
1147 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1148 rply.acpted_rply.ar_verf = xprt->xp_verf;
1149 rply.acpted_rply.ar_stat = SYSTEM_ERR;
1150 SVC_REPLY((SVCXPRT *)xprt, &rply);
1151 }
1152
1153 /*
1154 * Tell RPC package to not complain about version errors to the client. This
1155 * is useful when revving broadcast protocols that sit on a fixed address.
1156 * There is really one (or should be only one) example of this kind of
1157 * protocol: the portmapper (or rpc binder).
1158 */
1159 void
1160 __svc_versquiet_on(const SVCXPRT *xprt)
1161 {
1162 /* LINTED pointer alignment */
1163 svc_flags(xprt) |= SVC_VERSQUIET;
1164 }
1165
1166 void
1167 __svc_versquiet_off(const SVCXPRT *xprt)
1168 {
1169 /* LINTED pointer alignment */
1170 svc_flags(xprt) &= ~SVC_VERSQUIET;
1171 }
1172
1173 void
1174 svc_versquiet(const SVCXPRT *xprt)
1175 {
1176 __svc_versquiet_on(xprt);
1177 }
1178
1179 int
1180 __svc_versquiet_get(const SVCXPRT *xprt)
1181 {
1182 /* LINTED pointer alignment */
1183 return (svc_flags(xprt) & SVC_VERSQUIET);
1184 }
1185
1186 /*
1187 * Authentication error reply
1188 */
1189 void
1190 svcerr_auth(const SVCXPRT *xprt, const enum auth_stat why)
1191 {
1192 struct rpc_msg rply;
1193
1194 rply.rm_direction = REPLY;
1195 rply.rm_reply.rp_stat = MSG_DENIED;
1196 rply.rjcted_rply.rj_stat = AUTH_ERROR;
1197 rply.rjcted_rply.rj_why = why;
1198 SVC_REPLY((SVCXPRT *)xprt, &rply);
1199 }
1200
1201 /*
1202 * Auth too weak error reply
1203 */
1204 void
1205 svcerr_weakauth(const SVCXPRT *xprt)
1206 {
1207 svcerr_auth(xprt, AUTH_TOOWEAK);
1208 }
1209
1210 /*
1211 * Program unavailable error reply
1212 */
1213 void
1214 svcerr_noprog(const SVCXPRT *xprt)
1215 {
1216 struct rpc_msg rply;
1217
1218 rply.rm_direction = REPLY;
1219 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1220 rply.acpted_rply.ar_verf = xprt->xp_verf;
1221 rply.acpted_rply.ar_stat = PROG_UNAVAIL;
1222 SVC_REPLY((SVCXPRT *)xprt, &rply);
1223 }
1224
1225 /*
1226 * Program version mismatch error reply
1227 */
1228 void
1229 svcerr_progvers(const SVCXPRT *xprt, const rpcvers_t low_vers,
1230 const rpcvers_t high_vers)
1231 {
1232 struct rpc_msg rply;
1233
1234 rply.rm_direction = REPLY;
1235 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1236 rply.acpted_rply.ar_verf = xprt->xp_verf;
1237 rply.acpted_rply.ar_stat = PROG_MISMATCH;
1238 rply.acpted_rply.ar_vers.low = low_vers;
1239 rply.acpted_rply.ar_vers.high = high_vers;
1240 SVC_REPLY((SVCXPRT *)xprt, &rply);
1241 }
1242
1243 /* ******************* SERVER INPUT STUFF ******************* */
1244
1245 /*
1246 * Get server side input from some transport.
1247 *
1248 * Statement of authentication parameters management:
1249 * This function owns and manages all authentication parameters, specifically
1250 * the "raw" parameters (msg.rm_call.cb_cred and msg.rm_call.cb_verf) and
1251 * the "cooked" credentials (rqst->rq_clntcred).
1252 * However, this function does not know the structure of the cooked
1253 * credentials, so it make the following assumptions:
1254 * a) the structure is contiguous (no pointers), and
1255 * b) the cred structure size does not exceed RQCRED_SIZE bytes.
1256 * In all events, all three parameters are freed upon exit from this routine.
1257 * The storage is trivially management on the call stack in user land, but
1258 * is mallocated in kernel land.
1259 */
1260
1261 void
1262 svc_getreq(int rdfds)
1263 {
1264 fd_set readfds;
1265
1266 FD_ZERO(&readfds);
1267 readfds.fds_bits[0] = rdfds;
1268 svc_getreqset(&readfds);
1269 }
1270
1271 void
1272 svc_getreqset(fd_set *readfds)
1273 {
1274 int i;
1275
1276 for (i = 0; i < svc_max_fd; i++) {
1277 /* fd has input waiting */
1278 if (FD_ISSET(i, readfds))
1279 svc_getreq_common(i);
1280 }
1281 }
1282
1283 void
1284 svc_getreq_poll(struct pollfd *pfdp, const int pollretval)
1285 {
1286 int i;
1287 int fds_found;
1288
1289 for (i = fds_found = 0; fds_found < pollretval; i++) {
1290 struct pollfd *p = &pfdp[i];
1291
1292 if (p->revents) {
1293 /* fd has input waiting */
1294 fds_found++;
1295 /*
1296 * We assume that this function is only called
1297 * via someone select()ing from svc_fdset or
1298 * poll()ing from svc_pollset[]. Thus it's safe
1299 * to handle the POLLNVAL event by simply turning
1300 * the corresponding bit off in svc_fdset. The
1301 * svc_pollset[] array is derived from svc_fdset
1302 * and so will also be updated eventually.
1303 *
1304 * XXX Should we do an xprt_unregister() instead?
1305 */
1306 /* Handle user callback */
1307 if (__is_a_userfd(p->fd) == TRUE) {
1308 (void) rw_rdlock(&svc_fd_lock);
1309 __svc_getreq_user(p);
1310 (void) rw_unlock(&svc_fd_lock);
1311 } else {
1312 if (p->revents & POLLNVAL) {
1313 (void) rw_wrlock(&svc_fd_lock);
1314 remove_pollfd(p->fd); /* XXX */
1315 (void) rw_unlock(&svc_fd_lock);
1316 } else {
1317 svc_getreq_common(p->fd);
1318 }
1319 }
1320 }
1321 }
1322 }
1323
1324 void
1325 svc_getreq_common(const int fd)
1326 {
1327 SVCXPRT *xprt;
1328 enum xprt_stat stat;
1329 struct rpc_msg *msg;
1330 struct svc_req *r;
1331 char *cred_area;
1332
1333 (void) rw_rdlock(&svc_fd_lock);
1334
1335 /* HANDLE USER CALLBACK */
1336 if (__is_a_userfd(fd) == TRUE) {
1337 struct pollfd virtual_fd;
1338
1339 virtual_fd.events = virtual_fd.revents = (short)0xFFFF;
1340 virtual_fd.fd = fd;
1341 __svc_getreq_user(&virtual_fd);
1342 (void) rw_unlock(&svc_fd_lock);
1343 return;
1344 }
1345
1346 /*
1347 * The transport associated with this fd could have been
1348 * removed from svc_timeout_nonblock_xprt_and_LRU, for instance.
1349 * This can happen if two or more fds get read events and are
1350 * passed to svc_getreq_poll/set, the first fd is seviced by
1351 * the dispatch routine and cleans up any dead transports. If
1352 * one of the dead transports removed is the other fd that
1353 * had a read event then svc_getreq_common() will be called with no
1354 * xprt associated with the fd that had the original read event.
1355 */
1356 if ((fd >= nsvc_xports) || (xprt = svc_xports[fd]) == NULL) {
1357 (void) rw_unlock(&svc_fd_lock);
1358 return;
1359 }
1360 (void) rw_unlock(&svc_fd_lock);
1361 /* LINTED pointer alignment */
1362 msg = SVCEXT(xprt)->msg;
1363 /* LINTED pointer alignment */
1364 r = SVCEXT(xprt)->req;
1365 /* LINTED pointer alignment */
1366 cred_area = SVCEXT(xprt)->cred_area;
1367 msg->rm_call.cb_cred.oa_base = cred_area;
1368 msg->rm_call.cb_verf.oa_base = &(cred_area[MAX_AUTH_BYTES]);
1369 r->rq_clntcred = &(cred_area[2 * MAX_AUTH_BYTES]);
1370
1371 /* receive msgs from xprtprt (support batch calls) */
1372 do {
1373 bool_t dispatch;
1374
1375 if (dispatch = SVC_RECV(xprt, msg))
1376 (void) _svc_prog_dispatch(xprt, msg, r);
1377 /*
1378 * Check if the xprt has been disconnected in a recursive call
1379 * in the service dispatch routine. If so, then break
1380 */
1381 (void) rw_rdlock(&svc_fd_lock);
1382 if (xprt != svc_xports[fd]) {
1383 (void) rw_unlock(&svc_fd_lock);
1384 break;
1385 }
1386 (void) rw_unlock(&svc_fd_lock);
1387
1388 /*
1389 * Call cleanup procedure if set.
1390 */
1391 if (__proc_cleanup_cb != NULL && dispatch)
1392 (*__proc_cleanup_cb)(xprt);
1393
1394 if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
1395 SVC_DESTROY(xprt);
1396 break;
1397 }
1398 } while (stat == XPRT_MOREREQS);
1399 }
1400
1401 int
1402 _svc_prog_dispatch(SVCXPRT *xprt, struct rpc_msg *msg, struct svc_req *r)
1403 {
1404 struct svc_callout *s;
1405 enum auth_stat why;
1406 int prog_found;
1407 rpcvers_t low_vers;
1408 rpcvers_t high_vers;
1409 void (*disp_fn)();
1410
1411 r->rq_xprt = xprt;
1412 r->rq_prog = msg->rm_call.cb_prog;
1413 r->rq_vers = msg->rm_call.cb_vers;
1414 r->rq_proc = msg->rm_call.cb_proc;
1415 r->rq_cred = msg->rm_call.cb_cred;
1416 /* LINTED pointer alignment */
1417 SVC_XP_AUTH(r->rq_xprt).svc_ah_ops = svc_auth_any_ops;
1418 /* LINTED pointer alignment */
1419 SVC_XP_AUTH(r->rq_xprt).svc_ah_private = NULL;
1420
1421 /* first authenticate the message */
1422 /* Check for null flavor and bypass these calls if possible */
1423
1424 if (msg->rm_call.cb_cred.oa_flavor == AUTH_NULL) {
1425 r->rq_xprt->xp_verf.oa_flavor = _null_auth.oa_flavor;
1426 r->rq_xprt->xp_verf.oa_length = 0;
1427 } else {
1428 bool_t no_dispatch;
1429
1430 if ((why = __gss_authenticate(r, msg,
1431 &no_dispatch)) != AUTH_OK) {
1432 svcerr_auth(xprt, why);
1433 return (0);
1434 }
1435 if (no_dispatch)
1436 return (0);
1437 }
1438 /* match message with a registered service */
1439 prog_found = FALSE;
1440 low_vers = (rpcvers_t)(0 - 1);
1441 high_vers = 0;
1442 (void) rw_rdlock(&svc_lock);
1443 for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1444 if (s->sc_prog == r->rq_prog) {
1445 prog_found = TRUE;
1446 if (s->sc_vers == r->rq_vers) {
1447 if ((xprt->xp_netid == NULL) ||
1448 (s->sc_netid == NULL) ||
1449 (strcmp(xprt->xp_netid,
1450 s->sc_netid) == 0)) {
1451 disp_fn = (*s->sc_dispatch);
1452 (void) rw_unlock(&svc_lock);
1453 disp_fn(r, xprt);
1454 return (1);
1455 }
1456 prog_found = FALSE;
1457 }
1458 if (s->sc_vers < low_vers)
1459 low_vers = s->sc_vers;
1460 if (s->sc_vers > high_vers)
1461 high_vers = s->sc_vers;
1462 } /* found correct program */
1463 }
1464 (void) rw_unlock(&svc_lock);
1465
1466 /*
1467 * if we got here, the program or version
1468 * is not served ...
1469 */
1470 if (prog_found) {
1471 /* LINTED pointer alignment */
1472 if (!version_keepquiet(xprt))
1473 svcerr_progvers(xprt, low_vers, high_vers);
1474 } else {
1475 svcerr_noprog(xprt);
1476 }
1477 return (0);
1478 }
1479
1480 /* ******************* SVCXPRT allocation and deallocation ***************** */
1481
1482 /*
1483 * svc_xprt_alloc() - allocate a service transport handle
1484 */
1485 SVCXPRT *
1486 svc_xprt_alloc(void)
1487 {
1488 SVCXPRT *xprt = NULL;
1489 SVCXPRT_EXT *xt = NULL;
1490 SVCXPRT_LIST *xlist = NULL;
1491 struct rpc_msg *msg = NULL;
1492 struct svc_req *req = NULL;
1493 char *cred_area = NULL;
1494
1495 if ((xprt = calloc(1, sizeof (SVCXPRT))) == NULL)
1496 goto err_exit;
1497
1498 if ((xt = calloc(1, sizeof (SVCXPRT_EXT))) == NULL)
1499 goto err_exit;
1500 xprt->xp_p3 = (caddr_t)xt; /* SVCEXT(xprt) = xt */
1501
1502 if ((xlist = calloc(1, sizeof (SVCXPRT_LIST))) == NULL)
1503 goto err_exit;
1504 xt->my_xlist = xlist;
1505 xlist->xprt = xprt;
1506
1507 if ((msg = malloc(sizeof (struct rpc_msg))) == NULL)
1508 goto err_exit;
1509 xt->msg = msg;
1510
1511 if ((req = malloc(sizeof (struct svc_req))) == NULL)
1512 goto err_exit;
1513 xt->req = req;
1514
1515 if ((cred_area = malloc(2*MAX_AUTH_BYTES + RQCRED_SIZE)) == NULL)
1516 goto err_exit;
1517 xt->cred_area = cred_area;
1518
1519 /* LINTED pointer alignment */
1520 (void) mutex_init(&svc_send_mutex(xprt), USYNC_THREAD, (void *)0);
1521 return (xprt);
1522
1523 err_exit:
1524 svc_xprt_free(xprt);
1525 return (NULL);
1526 }
1527
1528
1529 /*
1530 * svc_xprt_free() - free a service handle
1531 */
1532 void
1533 svc_xprt_free(SVCXPRT *xprt)
1534 {
1535 /* LINTED pointer alignment */
1536 SVCXPRT_EXT *xt = xprt ? SVCEXT(xprt) : NULL;
1537 SVCXPRT_LIST *my_xlist = xt ? xt->my_xlist: NULL;
1538 struct rpc_msg *msg = xt ? xt->msg : NULL;
1539 struct svc_req *req = xt ? xt->req : NULL;
1540 char *cred_area = xt ? xt->cred_area : NULL;
1541
1542 if (xprt)
1543 free(xprt);
1544 if (xt)
1545 free(xt);
1546 if (my_xlist)
1547 free(my_xlist);
1548 if (msg)
1549 free(msg);
1550 if (req)
1551 free(req);
1552 if (cred_area)
1553 free(cred_area);
1554 }
1555
1556
1557 /*
1558 * svc_xprt_destroy() - free parent and child xprt list
1559 */
1560 void
1561 svc_xprt_destroy(SVCXPRT *xprt)
1562 {
1563 SVCXPRT_LIST *xlist, *xnext = NULL;
1564 int type;
1565
1566 /* LINTED pointer alignment */
1567 if (SVCEXT(xprt)->parent)
1568 /* LINTED pointer alignment */
1569 xprt = SVCEXT(xprt)->parent;
1570 /* LINTED pointer alignment */
1571 type = svc_type(xprt);
1572 /* LINTED pointer alignment */
1573 for (xlist = SVCEXT(xprt)->my_xlist; xlist != NULL; xlist = xnext) {
1574 xnext = xlist->next;
1575 xprt = xlist->xprt;
1576 switch (type) {
1577 case SVC_DGRAM:
1578 svc_dg_xprtfree(xprt);
1579 break;
1580 case SVC_RENDEZVOUS:
1581 svc_vc_xprtfree(xprt);
1582 break;
1583 case SVC_CONNECTION:
1584 svc_fd_xprtfree(xprt);
1585 break;
1586 case SVC_DOOR:
1587 svc_door_xprtfree(xprt);
1588 break;
1589 }
1590 }
1591 }
1592
1593
1594 /*
1595 * svc_copy() - make a copy of parent
1596 */
1597 SVCXPRT *
1598 svc_copy(SVCXPRT *xprt)
1599 {
1600 /* LINTED pointer alignment */
1601 switch (svc_type(xprt)) {
1602 case SVC_DGRAM:
1603 return (svc_dg_xprtcopy(xprt));
1604 case SVC_RENDEZVOUS:
1605 return (svc_vc_xprtcopy(xprt));
1606 case SVC_CONNECTION:
1607 return (svc_fd_xprtcopy(xprt));
1608 }
1609 return (NULL);
1610 }
1611
1612
1613 /*
1614 * _svc_destroy_private() - private SVC_DESTROY interface
1615 */
1616 void
1617 _svc_destroy_private(SVCXPRT *xprt)
1618 {
1619 /* LINTED pointer alignment */
1620 switch (svc_type(xprt)) {
1621 case SVC_DGRAM:
1622 _svc_dg_destroy_private(xprt);
1623 break;
1624 case SVC_RENDEZVOUS:
1625 case SVC_CONNECTION:
1626 _svc_vc_destroy_private(xprt, TRUE);
1627 break;
1628 }
1629 }
1630
1631 /*
1632 * svc_get_local_cred() - fetch local user credentials. This always
1633 * works over doors based transports. For local transports, this
1634 * does not yield correct results unless the __rpc_negotiate_uid()
1635 * call has been invoked to enable this feature.
1636 */
1637 bool_t
1638 svc_get_local_cred(SVCXPRT *xprt, svc_local_cred_t *lcred)
1639 {
1640 /* LINTED pointer alignment */
1641 if (svc_type(xprt) == SVC_DOOR)
1642 return (__svc_get_door_cred(xprt, lcred));
1643 return (__rpc_get_local_cred(xprt, lcred));
1644 }
1645
1646
1647 /* ******************* DUPLICATE ENTRY HANDLING ROUTINES ************** */
1648
1649 /*
1650 * the dup cacheing routines below provide a cache of received
1651 * transactions. rpc service routines can use this to detect
1652 * retransmissions and re-send a non-failure response. Uses a
1653 * lru scheme to find entries to get rid of entries in the cache,
1654 * though only DUP_DONE entries are placed on the lru list.
1655 * the routines were written towards development of a generic
1656 * SVC_DUP() interface, which can be expanded to encompass the
1657 * svc_dg_enablecache() routines as well. the cache is currently
1658 * private to the automounter.
1659 */
1660
1661
1662 /* dupcache header contains xprt specific information */
1663 struct dupcache {
1664 rwlock_t dc_lock;
1665 time_t dc_time;
1666 int dc_buckets;
1667 int dc_maxsz;
1668 int dc_basis;
1669 struct dupreq *dc_mru;
1670 struct dupreq **dc_hashtbl;
1671 };
1672
1673 /*
1674 * private duplicate cache request routines
1675 */
1676 static int __svc_dupcache_check(struct svc_req *, caddr_t *, uint_t *,
1677 struct dupcache *, uint32_t, uint32_t);
1678 static struct dupreq *__svc_dupcache_victim(struct dupcache *, time_t);
1679 static int __svc_dupcache_enter(struct svc_req *, struct dupreq *,
1680 struct dupcache *, uint32_t, uint32_t, time_t);
1681 static int __svc_dupcache_update(struct svc_req *, caddr_t, uint_t, int,
1682 struct dupcache *, uint32_t, uint32_t);
1683 #ifdef DUP_DEBUG
1684 static void __svc_dupcache_debug(struct dupcache *);
1685 #endif /* DUP_DEBUG */
1686
1687 /* default parameters for the dupcache */
1688 #define DUPCACHE_BUCKETS 257
1689 #define DUPCACHE_TIME 900
1690 #define DUPCACHE_MAXSZ INT_MAX
1691
1692 /*
1693 * __svc_dupcache_init(void *condition, int basis, char *xprt_cache)
1694 * initialize the duprequest cache and assign it to the xprt_cache
1695 * Use default values depending on the cache condition and basis.
1696 * return TRUE on success and FALSE on failure
1697 */
1698 bool_t
1699 __svc_dupcache_init(void *condition, int basis, char **xprt_cache)
1700 {
1701 static mutex_t initdc_lock = DEFAULTMUTEX;
1702 int i;
1703 struct dupcache *dc;
1704
1705 (void) mutex_lock(&initdc_lock);
1706 if (*xprt_cache != NULL) { /* do only once per xprt */
1707 (void) mutex_unlock(&initdc_lock);
1708 syslog(LOG_ERR,
1709 "__svc_dupcache_init: multiply defined dup cache");
1710 return (FALSE);
1711 }
1712
1713 switch (basis) {
1714 case DUPCACHE_FIXEDTIME:
1715 dc = malloc(sizeof (struct dupcache));
1716 if (dc == NULL) {
1717 (void) mutex_unlock(&initdc_lock);
1718 syslog(LOG_ERR,
1719 "__svc_dupcache_init: memory alloc failed");
1720 return (FALSE);
1721 }
1722 (void) rwlock_init(&(dc->dc_lock), USYNC_THREAD, NULL);
1723 if (condition != NULL)
1724 dc->dc_time = *((time_t *)condition);
1725 else
1726 dc->dc_time = DUPCACHE_TIME;
1727 dc->dc_buckets = DUPCACHE_BUCKETS;
1728 dc->dc_maxsz = DUPCACHE_MAXSZ;
1729 dc->dc_basis = basis;
1730 dc->dc_mru = NULL;
1731 dc->dc_hashtbl = malloc(dc->dc_buckets *
1732 sizeof (struct dupreq *));
1733 if (dc->dc_hashtbl == NULL) {
1734 free(dc);
1735 (void) mutex_unlock(&initdc_lock);
1736 syslog(LOG_ERR,
1737 "__svc_dupcache_init: memory alloc failed");
1738 return (FALSE);
1739 }
1740 for (i = 0; i < DUPCACHE_BUCKETS; i++)
1741 dc->dc_hashtbl[i] = NULL;
1742 *xprt_cache = (char *)dc;
1743 break;
1744 default:
1745 (void) mutex_unlock(&initdc_lock);
1746 syslog(LOG_ERR,
1747 "__svc_dupcache_init: undefined dup cache basis");
1748 return (FALSE);
1749 }
1750
1751 (void) mutex_unlock(&initdc_lock);
1752
1753 return (TRUE);
1754 }
1755
1756 /*
1757 * __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1758 * char *xprt_cache)
1759 * searches the request cache. Creates an entry and returns DUP_NEW if
1760 * the request is not found in the cache. If it is found, then it
1761 * returns the state of the request (in progress, drop, or done) and
1762 * also allocates, and passes back results to the user (if any) in
1763 * resp_buf, and its length in resp_bufsz. DUP_ERROR is returned on error.
1764 */
1765 int
1766 __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1767 char *xprt_cache)
1768 {
1769 uint32_t drxid, drhash;
1770 int rc;
1771 struct dupreq *dr = NULL;
1772 time_t timenow = time(NULL);
1773
1774 /* LINTED pointer alignment */
1775 struct dupcache *dc = (struct dupcache *)xprt_cache;
1776
1777 if (dc == NULL) {
1778 syslog(LOG_ERR, "__svc_dup: undefined cache");
1779 return (DUP_ERROR);
1780 }
1781
1782 /* get the xid of the request */
1783 if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
1784 syslog(LOG_ERR, "__svc_dup: xid error");
1785 return (DUP_ERROR);
1786 }
1787 drhash = drxid % dc->dc_buckets;
1788
1789 if ((rc = __svc_dupcache_check(req, resp_buf, resp_bufsz, dc, drxid,
1790 drhash)) != DUP_NEW)
1791 return (rc);
1792
1793 if ((dr = __svc_dupcache_victim(dc, timenow)) == NULL)
1794 return (DUP_ERROR);
1795
1796 if ((rc = __svc_dupcache_enter(req, dr, dc, drxid, drhash, timenow))
1797 == DUP_ERROR)
1798 return (rc);
1799
1800 return (DUP_NEW);
1801 }
1802
1803
1804
1805 /*
1806 * __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf,
1807 * uint_t *resp_bufsz,truct dupcache *dc, uint32_t drxid,
1808 * uint32_t drhash)
1809 * Checks to see whether an entry already exists in the cache. If it does
1810 * copy back into the resp_buf, if appropriate. Return the status of
1811 * the request, or DUP_NEW if the entry is not in the cache
1812 */
1813 static int
1814 __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1815 struct dupcache *dc, uint32_t drxid, uint32_t drhash)
1816 {
1817 struct dupreq *dr = NULL;
1818
1819 (void) rw_rdlock(&(dc->dc_lock));
1820 dr = dc->dc_hashtbl[drhash];
1821 while (dr != NULL) {
1822 if (dr->dr_xid == drxid &&
1823 dr->dr_proc == req->rq_proc &&
1824 dr->dr_prog == req->rq_prog &&
1825 dr->dr_vers == req->rq_vers &&
1826 dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
1827 memcmp(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
1828 dr->dr_addr.len) == 0) { /* entry found */
1829 if (dr->dr_hash != drhash) {
1830 /* sanity check */
1831 (void) rw_unlock((&dc->dc_lock));
1832 syslog(LOG_ERR,
1833 "\n__svc_dupdone: hashing error");
1834 return (DUP_ERROR);
1835 }
1836
1837 /*
1838 * return results for requests on lru list, if
1839 * appropriate requests must be DUP_DROP or DUP_DONE
1840 * to have a result. A NULL buffer in the cache
1841 * implies no results were sent during dupdone.
1842 * A NULL buffer in the call implies not interested
1843 * in results.
1844 */
1845 if (((dr->dr_status == DUP_DONE) ||
1846 (dr->dr_status == DUP_DROP)) &&
1847 resp_buf != NULL &&
1848 dr->dr_resp.buf != NULL) {
1849 *resp_buf = malloc(dr->dr_resp.len);
1850 if (*resp_buf == NULL) {
1851 syslog(LOG_ERR,
1852 "__svc_dupcache_check: malloc failed");
1853 (void) rw_unlock(&(dc->dc_lock));
1854 return (DUP_ERROR);
1855 }
1856 (void) memset(*resp_buf, 0, dr->dr_resp.len);
1857 (void) memcpy(*resp_buf, dr->dr_resp.buf,
1858 dr->dr_resp.len);
1859 *resp_bufsz = dr->dr_resp.len;
1860 } else {
1861 /* no result */
1862 if (resp_buf)
1863 *resp_buf = NULL;
1864 if (resp_bufsz)
1865 *resp_bufsz = 0;
1866 }
1867 (void) rw_unlock(&(dc->dc_lock));
1868 return (dr->dr_status);
1869 }
1870 dr = dr->dr_chain;
1871 }
1872 (void) rw_unlock(&(dc->dc_lock));
1873 return (DUP_NEW);
1874 }
1875
1876 /*
1877 * __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1878 * Return a victim dupreq entry to the caller, depending on cache policy.
1879 */
1880 static struct dupreq *
1881 __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1882 {
1883 struct dupreq *dr = NULL;
1884
1885 switch (dc->dc_basis) {
1886 case DUPCACHE_FIXEDTIME:
1887 /*
1888 * The hash policy is to free up a bit of the hash
1889 * table before allocating a new entry as the victim.
1890 * Freeing up the hash table each time should split
1891 * the cost of keeping the hash table clean among threads.
1892 * Note that only DONE or DROPPED entries are on the lru
1893 * list but we do a sanity check anyway.
1894 */
1895 (void) rw_wrlock(&(dc->dc_lock));
1896 while ((dc->dc_mru) && (dr = dc->dc_mru->dr_next) &&
1897 ((timenow - dr->dr_time) > dc->dc_time)) {
1898 /* clean and then free the entry */
1899 if (dr->dr_status != DUP_DONE &&
1900 dr->dr_status != DUP_DROP) {
1901 /*
1902 * The LRU list can't contain an
1903 * entry where the status is other than
1904 * DUP_DONE or DUP_DROP.
1905 */
1906 syslog(LOG_ERR,
1907 "__svc_dupcache_victim: bad victim");
1908 #ifdef DUP_DEBUG
1909 /*
1910 * Need to hold the reader/writers lock to
1911 * print the cache info, since we already
1912 * hold the writers lock, we shall continue
1913 * calling __svc_dupcache_debug()
1914 */
1915 __svc_dupcache_debug(dc);
1916 #endif /* DUP_DEBUG */
1917 (void) rw_unlock(&(dc->dc_lock));
1918 return (NULL);
1919 }
1920 /* free buffers */
1921 if (dr->dr_resp.buf) {
1922 free(dr->dr_resp.buf);
1923 dr->dr_resp.buf = NULL;
1924 }
1925 if (dr->dr_addr.buf) {
1926 free(dr->dr_addr.buf);
1927 dr->dr_addr.buf = NULL;
1928 }
1929
1930 /* unhash the entry */
1931 if (dr->dr_chain)
1932 dr->dr_chain->dr_prevchain = dr->dr_prevchain;
1933 if (dr->dr_prevchain)
1934 dr->dr_prevchain->dr_chain = dr->dr_chain;
1935 if (dc->dc_hashtbl[dr->dr_hash] == dr)
1936 dc->dc_hashtbl[dr->dr_hash] = dr->dr_chain;
1937
1938 /* modify the lru pointers */
1939 if (dc->dc_mru == dr) {
1940 dc->dc_mru = NULL;
1941 } else {
1942 dc->dc_mru->dr_next = dr->dr_next;
1943 dr->dr_next->dr_prev = dc->dc_mru;
1944 }
1945 free(dr);
1946 dr = NULL;
1947 }
1948 (void) rw_unlock(&(dc->dc_lock));
1949
1950 /*
1951 * Allocate and return new clean entry as victim
1952 */
1953 if ((dr = malloc(sizeof (*dr))) == NULL) {
1954 syslog(LOG_ERR,
1955 "__svc_dupcache_victim: malloc failed");
1956 return (NULL);
1957 }
1958 (void) memset(dr, 0, sizeof (*dr));
1959 return (dr);
1960 default:
1961 syslog(LOG_ERR,
1962 "__svc_dupcache_victim: undefined dup cache_basis");
1963 return (NULL);
1964 }
1965 }
1966
1967 /*
1968 * __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1969 * struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1970 * build new duprequest entry and then insert into the cache
1971 */
1972 static int
1973 __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1974 struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1975 {
1976 dr->dr_xid = drxid;
1977 dr->dr_prog = req->rq_prog;
1978 dr->dr_vers = req->rq_vers;
1979 dr->dr_proc = req->rq_proc;
1980 dr->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len;
1981 dr->dr_addr.len = dr->dr_addr.maxlen;
1982 if ((dr->dr_addr.buf = malloc(dr->dr_addr.maxlen)) == NULL) {
1983 syslog(LOG_ERR, "__svc_dupcache_enter: malloc failed");
1984 free(dr);
1985 return (DUP_ERROR);
1986 }
1987 (void) memset(dr->dr_addr.buf, 0, dr->dr_addr.len);
1988 (void) memcpy(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
1989 dr->dr_addr.len);
1990 dr->dr_resp.buf = NULL;
1991 dr->dr_resp.maxlen = 0;
1992 dr->dr_resp.len = 0;
1993 dr->dr_status = DUP_INPROGRESS;
1994 dr->dr_time = timenow;
1995 dr->dr_hash = drhash; /* needed for efficient victim cleanup */
1996
1997 /* place entry at head of hash table */
1998 (void) rw_wrlock(&(dc->dc_lock));
1999 dr->dr_chain = dc->dc_hashtbl[drhash];
2000 dr->dr_prevchain = NULL;
2001 if (dc->dc_hashtbl[drhash] != NULL)
2002 dc->dc_hashtbl[drhash]->dr_prevchain = dr;
2003 dc->dc_hashtbl[drhash] = dr;
2004 (void) rw_unlock(&(dc->dc_lock));
2005 return (DUP_NEW);
2006 }
2007
2008 /*
2009 * __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2010 * int status, char *xprt_cache)
2011 * Marks the request done (DUP_DONE or DUP_DROP) and stores the response.
2012 * Only DONE and DROP requests can be marked as done. Sets the lru pointers
2013 * to make the entry the most recently used. Returns DUP_ERROR or status.
2014 */
2015 int
2016 __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2017 int status, char *xprt_cache)
2018 {
2019 uint32_t drxid, drhash;
2020 int rc;
2021
2022 /* LINTED pointer alignment */
2023 struct dupcache *dc = (struct dupcache *)xprt_cache;
2024
2025 if (dc == NULL) {
2026 syslog(LOG_ERR, "__svc_dupdone: undefined cache");
2027 return (DUP_ERROR);
2028 }
2029
2030 if (status != DUP_DONE && status != DUP_DROP) {
2031 syslog(LOG_ERR, "__svc_dupdone: invalid dupdone status");
2032 syslog(LOG_ERR, " must be DUP_DONE or DUP_DROP");
2033 return (DUP_ERROR);
2034 }
2035
2036 /* find the xid of the entry in the cache */
2037 if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
2038 syslog(LOG_ERR, "__svc_dup: xid error");
2039 return (DUP_ERROR);
2040 }
2041 drhash = drxid % dc->dc_buckets;
2042
2043 /* update the status of the entry and result buffers, if required */
2044 if ((rc = __svc_dupcache_update(req, resp_buf, resp_bufsz, status,
2045 dc, drxid, drhash)) == DUP_ERROR) {
2046 syslog(LOG_ERR, "__svc_dupdone: cache entry error");
2047 return (DUP_ERROR);
2048 }
2049
2050 return (rc);
2051 }
2052
2053 /*
2054 * __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf,
2055 * uint_t resp_bufsz, int status, struct dupcache *dc, uint32_t drxid,
2056 * uint32_t drhash)
2057 * Check if entry exists in the dupcacache. If it does, update its status
2058 * and time and also its buffer, if appropriate. Its possible, but unlikely
2059 * for DONE requests to not exist in the cache. Return DUP_ERROR or status.
2060 */
2061 static int
2062 __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2063 int status, struct dupcache *dc, uint32_t drxid, uint32_t drhash)
2064 {
2065 struct dupreq *dr = NULL;
2066 time_t timenow = time(NULL);
2067
2068 (void) rw_wrlock(&(dc->dc_lock));
2069 dr = dc->dc_hashtbl[drhash];
2070 while (dr != NULL) {
2071 if (dr->dr_xid == drxid &&
2072 dr->dr_proc == req->rq_proc &&
2073 dr->dr_prog == req->rq_prog &&
2074 dr->dr_vers == req->rq_vers &&
2075 dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
2076 memcmp(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
2077 dr->dr_addr.len) == 0) { /* entry found */
2078 if (dr->dr_hash != drhash) {
2079 /* sanity check */
2080 (void) rw_unlock(&(dc->dc_lock));
2081 syslog(LOG_ERR,
2082 "\n__svc_dupdone: hashing error");
2083 return (DUP_ERROR);
2084 }
2085
2086 /* store the results if bufer is not NULL */
2087 if (resp_buf != NULL) {
2088 if ((dr->dr_resp.buf =
2089 malloc(resp_bufsz)) == NULL) {
2090 (void) rw_unlock(&(dc->dc_lock));
2091 syslog(LOG_ERR,
2092 "__svc_dupdone: malloc failed");
2093 return (DUP_ERROR);
2094 }
2095 (void) memset(dr->dr_resp.buf, 0, resp_bufsz);
2096 (void) memcpy(dr->dr_resp.buf, resp_buf,
2097 (uint_t)resp_bufsz);
2098 dr->dr_resp.len = resp_bufsz;
2099 }
2100
2101 /* update status and done time */
2102 dr->dr_status = status;
2103 dr->dr_time = timenow;
2104
2105 /* move the entry to the mru position */
2106 if (dc->dc_mru == NULL) {
2107 dr->dr_next = dr;
2108 dr->dr_prev = dr;
2109 } else {
2110 dr->dr_next = dc->dc_mru->dr_next;
2111 dc->dc_mru->dr_next->dr_prev = dr;
2112 dr->dr_prev = dc->dc_mru;
2113 dc->dc_mru->dr_next = dr;
2114 }
2115 dc->dc_mru = dr;
2116
2117 (void) rw_unlock(&(dc->dc_lock));
2118 return (status);
2119 }
2120 dr = dr->dr_chain;
2121 }
2122 (void) rw_unlock(&(dc->dc_lock));
2123 syslog(LOG_ERR, "__svc_dupdone: entry not in dup cache");
2124 return (DUP_ERROR);
2125 }
2126
2127 #ifdef DUP_DEBUG
2128 /*
2129 * __svc_dupcache_debug(struct dupcache *dc)
2130 * print out the hash table stuff
2131 *
2132 * This function requires the caller to hold the reader
2133 * or writer version of the duplicate request cache lock (dc_lock).
2134 */
2135 static void
2136 __svc_dupcache_debug(struct dupcache *dc)
2137 {
2138 struct dupreq *dr = NULL;
2139 int i;
2140 bool_t bval;
2141
2142 fprintf(stderr, " HASHTABLE\n");
2143 for (i = 0; i < dc->dc_buckets; i++) {
2144 bval = FALSE;
2145 dr = dc->dc_hashtbl[i];
2146 while (dr != NULL) {
2147 if (!bval) { /* ensures bucket printed only once */
2148 fprintf(stderr, " bucket : %d\n", i);
2149 bval = TRUE;
2150 }
2151 fprintf(stderr, "\txid: %u status: %d time: %ld",
2152 dr->dr_xid, dr->dr_status, dr->dr_time);
2153 fprintf(stderr, " dr: %x chain: %x prevchain: %x\n",
2154 dr, dr->dr_chain, dr->dr_prevchain);
2155 dr = dr->dr_chain;
2156 }
2157 }
2158
2159 fprintf(stderr, " LRU\n");
2160 if (dc->dc_mru) {
2161 dr = dc->dc_mru->dr_next; /* lru */
2162 while (dr != dc->dc_mru) {
2163 fprintf(stderr, "\txid: %u status : %d time : %ld",
2164 dr->dr_xid, dr->dr_status, dr->dr_time);
2165 fprintf(stderr, " dr: %x next: %x prev: %x\n",
2166 dr, dr->dr_next, dr->dr_prev);
2167 dr = dr->dr_next;
2168 }
2169 fprintf(stderr, "\txid: %u status: %d time: %ld",
2170 dr->dr_xid, dr->dr_status, dr->dr_time);
2171 fprintf(stderr, " dr: %x next: %x prev: %x\n",
2172 dr, dr->dr_next, dr->dr_prev);
2173 }
2174 }
2175 #endif /* DUP_DEBUG */