Print this page
3772 consider raising default descriptor soft limit
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/lib/libnsl/rpc/svc.c
+++ new/usr/src/lib/libnsl/rpc/svc.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 25 */
26 26 /*
27 27 * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved.
28 28 */
29 29 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
30 30 /* All Rights Reserved */
31 31 /*
32 32 * Portions of this source code were derived from Berkeley
33 33 * 4.3 BSD under license from the Regents of the University of
34 34 * California.
35 35 */
36 36
37 37 /*
38 38 * svc.c, Server-side remote procedure call interface.
39 39 *
40 40 * There are two sets of procedures here. The xprt routines are
41 41 * for handling transport handles. The svc routines handle the
42 42 * list of service routines.
43 43 *
44 44 */
45 45
46 46 #include "mt.h"
47 47 #include "rpc_mt.h"
48 48 #include <assert.h>
49 49 #include <errno.h>
50 50 #include <sys/types.h>
51 51 #include <stropts.h>
52 52 #include <sys/conf.h>
53 53 #include <rpc/rpc.h>
54 54 #ifdef PORTMAP
55 55 #include <rpc/pmap_clnt.h>
56 56 #endif
57 57 #include <sys/poll.h>
58 58 #include <netconfig.h>
59 59 #include <syslog.h>
60 60 #include <stdlib.h>
61 61 #include <unistd.h>
62 62 #include <string.h>
63 63 #include <limits.h>
64 64
65 65 extern bool_t __svc_get_door_cred();
66 66 extern bool_t __rpc_get_local_cred();
67 67
68 68 SVCXPRT **svc_xports;
69 69 static int nsvc_xports; /* total number of svc_xports allocated */
70 70
71 71 XDR **svc_xdrs; /* common XDR receive area */
72 72 int nsvc_xdrs; /* total number of svc_xdrs allocated */
73 73
74 74 int __rpc_use_pollfd_done; /* to unlimit the number of connections */
75 75
76 76 #define NULL_SVC ((struct svc_callout *)0)
77 77 #define RQCRED_SIZE 400 /* this size is excessive */
78 78
79 79 /*
80 80 * The services list
81 81 * Each entry represents a set of procedures (an rpc program).
82 82 * The dispatch routine takes request structs and runs the
83 83 * appropriate procedure.
84 84 */
85 85 static struct svc_callout {
86 86 struct svc_callout *sc_next;
87 87 rpcprog_t sc_prog;
88 88 rpcvers_t sc_vers;
89 89 char *sc_netid;
90 90 void (*sc_dispatch)();
91 91 } *svc_head;
92 92 extern rwlock_t svc_lock;
93 93
94 94 static struct svc_callout *svc_find();
95 95 int _svc_prog_dispatch();
↓ open down ↓ |
95 lines elided |
↑ open up ↑ |
96 96 void svc_getreq_common();
97 97 char *strdup();
98 98
99 99 extern mutex_t svc_door_mutex;
100 100 extern cond_t svc_door_waitcv;
101 101 extern int svc_ndoorfds;
102 102 extern SVCXPRT_LIST *_svc_xprtlist;
103 103 extern mutex_t xprtlist_lock;
104 104 extern void __svc_rm_from_xlist();
105 105
106 +#if !defined(_LP64)
106 107 extern fd_set _new_svc_fdset;
108 +#endif
107 109
108 110 /*
109 111 * If the allocated array of reactor is too small, this value is used as a
110 112 * margin. This reduces the number of allocations.
111 113 */
112 114 #define USER_FD_INCREMENT 5
113 115
114 116 static void add_pollfd(int fd, short events);
115 117 static void remove_pollfd(int fd);
116 118 static void __svc_remove_input_of_fd(int fd);
117 119
118 120 /*
119 121 * Data used to handle reactor:
120 122 * - one file descriptor we listen to,
121 123 * - one callback we call if the fd pops,
122 124 * - and a cookie passed as a parameter to the callback.
123 125 *
124 126 * The structure is an array indexed on the file descriptor. Each entry is
125 127 * pointing to the first element of a double-linked list of callback.
126 128 * only one callback may be associated to a couple (fd, event).
127 129 */
128 130
129 131 struct _svc_user_fd_head;
130 132
131 133 typedef struct {
132 134 struct _svc_user_fd_node *next;
133 135 struct _svc_user_fd_node *previous;
134 136 } _svc_user_link;
135 137
136 138 typedef struct _svc_user_fd_node {
137 139 _svc_user_link lnk;
138 140 svc_input_id_t id;
139 141 int fd;
140 142 unsigned int events;
141 143 svc_callback_t callback;
142 144 void* cookie;
143 145 } _svc_user_fd_node;
144 146
145 147 typedef struct _svc_user_fd_head {
146 148 struct _svc_user_fd_node *list;
147 149 unsigned int mask; /* logical OR of all sub-masks */
148 150 } _svc_user_fd_head;
149 151
150 152
151 153 /* Array of defined reactor - indexed on file descriptor */
152 154 static _svc_user_fd_head *svc_userfds = NULL;
153 155
154 156 /* current size of file descriptor */
155 157 static int svc_nuserfds = 0;
156 158
157 159 /* Mutex to ensure MT safe operations for user fds callbacks. */
158 160 static mutex_t svc_userfds_lock = DEFAULTMUTEX;
159 161
160 162
161 163 /*
162 164 * This structure is used to have constant time alogrithms. There is an array
163 165 * of this structure as large as svc_nuserfds. When the user is registering a
164 166 * new callback, the address of the created structure is stored in a cell of
165 167 * this array. The address of this cell is the returned unique identifier.
166 168 *
167 169 * On removing, the id is given by the user, then we know if this cell is
168 170 * filled or not (with free). If it is free, we return an error. Otherwise,
169 171 * we can free the structure pointed by fd_node.
170 172 *
171 173 * On insertion, we use the linked list created by (first_free,
172 174 * next_free). In this way with a constant time computation, we can give a
173 175 * correct index to the user.
174 176 */
175 177
176 178 typedef struct _svc_management_user_fd {
177 179 bool_t free;
178 180 union {
179 181 svc_input_id_t next_free;
180 182 _svc_user_fd_node *fd_node;
181 183 } data;
182 184 } _svc_management_user_fd;
183 185
184 186 /* index to the first free elem */
185 187 static svc_input_id_t first_free = (svc_input_id_t)-1;
186 188 /* the size of this array is the same as svc_nuserfds */
187 189 static _svc_management_user_fd* user_fd_mgt_array = NULL;
188 190
189 191 /* current size of user_fd_mgt_array */
190 192 static int svc_nmgtuserfds = 0;
191 193
192 194
193 195 /* Define some macros to access data associated to registration ids. */
194 196 #define node_from_id(id) (user_fd_mgt_array[(int)id].data.fd_node)
195 197 #define is_free_id(id) (user_fd_mgt_array[(int)id].free)
196 198
197 199 #ifndef POLLSTANDARD
198 200 #define POLLSTANDARD \
199 201 (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND| \
200 202 POLLWRBAND|POLLERR|POLLHUP|POLLNVAL)
201 203 #endif
202 204
203 205 /*
204 206 * To free an Id, we set the cell as free and insert its address in the list
205 207 * of free cell.
206 208 */
207 209
208 210 static void
209 211 _svc_free_id(const svc_input_id_t id)
210 212 {
211 213 assert(((int)id >= 0) && ((int)id < svc_nmgtuserfds));
212 214 user_fd_mgt_array[(int)id].free = TRUE;
213 215 user_fd_mgt_array[(int)id].data.next_free = first_free;
214 216 first_free = id;
215 217 }
216 218
217 219 /*
218 220 * To get a free cell, we just have to take it from the free linked list and
219 221 * set the flag to "not free". This function also allocates new memory if
220 222 * necessary
221 223 */
222 224 static svc_input_id_t
223 225 _svc_attribute_new_id(_svc_user_fd_node *node)
224 226 {
225 227 int selected_index = (int)first_free;
226 228 assert(node != NULL);
227 229
228 230 if (selected_index == -1) {
229 231 /* Allocate new entries */
230 232 int L_inOldSize = svc_nmgtuserfds;
231 233 int i;
232 234 _svc_management_user_fd *tmp;
233 235
234 236 svc_nmgtuserfds += USER_FD_INCREMENT;
235 237
236 238 tmp = realloc(user_fd_mgt_array,
237 239 svc_nmgtuserfds * sizeof (_svc_management_user_fd));
238 240
239 241 if (tmp == NULL) {
240 242 syslog(LOG_ERR, "_svc_attribute_new_id: out of memory");
241 243 svc_nmgtuserfds = L_inOldSize;
242 244 errno = ENOMEM;
243 245 return ((svc_input_id_t)-1);
244 246 }
245 247
246 248 user_fd_mgt_array = tmp;
247 249
248 250 for (i = svc_nmgtuserfds - 1; i >= L_inOldSize; i--)
249 251 _svc_free_id((svc_input_id_t)i);
250 252 selected_index = (int)first_free;
251 253 }
252 254
253 255 node->id = (svc_input_id_t)selected_index;
254 256 first_free = user_fd_mgt_array[selected_index].data.next_free;
255 257
256 258 user_fd_mgt_array[selected_index].data.fd_node = node;
257 259 user_fd_mgt_array[selected_index].free = FALSE;
258 260
259 261 return ((svc_input_id_t)selected_index);
260 262 }
261 263
262 264 /*
263 265 * Access to a pollfd treatment. Scan all the associated callbacks that have
264 266 * at least one bit in their mask that masks a received event.
265 267 *
266 268 * If event POLLNVAL is received, we check that one callback processes it, if
267 269 * not, then remove the file descriptor from the poll. If there is one, let
268 270 * the user do the work.
269 271 */
270 272 void
271 273 __svc_getreq_user(struct pollfd *pfd)
272 274 {
273 275 int fd = pfd->fd;
274 276 short revents = pfd->revents;
275 277 bool_t invalHandled = FALSE;
276 278 _svc_user_fd_node *node;
277 279
278 280 (void) mutex_lock(&svc_userfds_lock);
279 281
280 282 if ((fd < 0) || (fd >= svc_nuserfds)) {
281 283 (void) mutex_unlock(&svc_userfds_lock);
282 284 return;
283 285 }
284 286
285 287 node = svc_userfds[fd].list;
286 288
287 289 /* check if at least one mask fits */
288 290 if (0 == (revents & svc_userfds[fd].mask)) {
289 291 (void) mutex_unlock(&svc_userfds_lock);
290 292 return;
291 293 }
292 294
293 295 while ((svc_userfds[fd].mask != 0) && (node != NULL)) {
294 296 /*
295 297 * If one of the received events maps the ones the node listens
296 298 * to
297 299 */
298 300 _svc_user_fd_node *next = node->lnk.next;
299 301
300 302 if (node->callback != NULL) {
301 303 if (node->events & revents) {
302 304 if (revents & POLLNVAL) {
303 305 invalHandled = TRUE;
304 306 }
305 307
306 308 /*
307 309 * The lock must be released before calling the
308 310 * user function, as this function can call
309 311 * svc_remove_input() for example.
310 312 */
311 313 (void) mutex_unlock(&svc_userfds_lock);
312 314 node->callback(node->id, node->fd,
313 315 node->events & revents, node->cookie);
314 316 /*
315 317 * Do not use the node structure anymore, as it
316 318 * could have been deallocated by the previous
317 319 * callback.
318 320 */
319 321 (void) mutex_lock(&svc_userfds_lock);
320 322 }
321 323 }
322 324 node = next;
323 325 }
324 326
325 327 if ((revents & POLLNVAL) && !invalHandled)
326 328 __svc_remove_input_of_fd(fd);
327 329 (void) mutex_unlock(&svc_userfds_lock);
328 330 }
329 331
330 332
331 333 /*
332 334 * Check if a file descriptor is associated with a user reactor.
333 335 * To do this, just check that the array indexed on fd has a non-void linked
334 336 * list (ie. first element is not NULL)
335 337 */
336 338 bool_t
337 339 __is_a_userfd(int fd)
338 340 {
339 341 /* Checks argument */
340 342 if ((fd < 0) || (fd >= svc_nuserfds))
341 343 return (FALSE);
342 344 return ((svc_userfds[fd].mask == 0x0000)? FALSE:TRUE);
343 345 }
344 346
345 347 /* free everything concerning user fd */
346 348 /* used in svc_run.c => no static */
347 349
348 350 void
349 351 __destroy_userfd(void)
350 352 {
351 353 int one_fd;
352 354 /* Clean user fd */
353 355 if (svc_userfds != NULL) {
354 356 for (one_fd = 0; one_fd < svc_nuserfds; one_fd++) {
355 357 _svc_user_fd_node *node;
356 358
357 359 node = svc_userfds[one_fd].list;
358 360 while (node != NULL) {
359 361 _svc_user_fd_node *tmp = node;
360 362 _svc_free_id(node->id);
361 363 node = node->lnk.next;
362 364 free(tmp);
363 365 }
364 366 }
365 367
366 368 free(user_fd_mgt_array);
367 369 user_fd_mgt_array = NULL;
368 370 first_free = (svc_input_id_t)-1;
369 371
370 372 free(svc_userfds);
371 373 svc_userfds = NULL;
372 374 svc_nuserfds = 0;
373 375 }
374 376 }
375 377
376 378 /*
377 379 * Remove all the callback associated with a fd => useful when the fd is
378 380 * closed for instance
379 381 */
380 382 static void
381 383 __svc_remove_input_of_fd(int fd)
382 384 {
383 385 _svc_user_fd_node **pnode;
384 386 _svc_user_fd_node *tmp;
385 387
386 388 if ((fd < 0) || (fd >= svc_nuserfds))
387 389 return;
388 390
389 391 pnode = &svc_userfds[fd].list;
390 392 while ((tmp = *pnode) != NULL) {
391 393 *pnode = tmp->lnk.next;
392 394
393 395 _svc_free_id(tmp->id);
394 396 free(tmp);
395 397 }
396 398
397 399 svc_userfds[fd].mask = 0;
398 400 }
399 401
400 402 /*
401 403 * Allow user to add an fd in the poll list. If it does not succeed, return
402 404 * -1. Otherwise, return a svc_id
403 405 */
404 406
405 407 svc_input_id_t
406 408 svc_add_input(int user_fd, unsigned int events,
407 409 svc_callback_t user_callback, void *cookie)
408 410 {
409 411 _svc_user_fd_node *new_node;
410 412
411 413 if (user_fd < 0) {
412 414 errno = EINVAL;
413 415 return ((svc_input_id_t)-1);
414 416 }
415 417
416 418 if ((events == 0x0000) ||
417 419 (events & ~(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\
418 420 POLLWRBAND|POLLERR|POLLHUP|POLLNVAL))) {
419 421 errno = EINVAL;
420 422 return ((svc_input_id_t)-1);
421 423 }
422 424
423 425 (void) mutex_lock(&svc_userfds_lock);
424 426
425 427 if ((user_fd < svc_nuserfds) &&
426 428 (svc_userfds[user_fd].mask & events) != 0) {
427 429 /* Already registrated call-back */
428 430 errno = EEXIST;
429 431 (void) mutex_unlock(&svc_userfds_lock);
430 432 return ((svc_input_id_t)-1);
431 433 }
432 434
433 435 /* Handle memory allocation. */
434 436 if (user_fd >= svc_nuserfds) {
435 437 int oldSize = svc_nuserfds;
436 438 int i;
437 439 _svc_user_fd_head *tmp;
438 440
439 441 svc_nuserfds = (user_fd + 1) + USER_FD_INCREMENT;
440 442
441 443 tmp = realloc(svc_userfds,
442 444 svc_nuserfds * sizeof (_svc_user_fd_head));
443 445
444 446 if (tmp == NULL) {
445 447 syslog(LOG_ERR, "svc_add_input: out of memory");
446 448 svc_nuserfds = oldSize;
447 449 errno = ENOMEM;
448 450 (void) mutex_unlock(&svc_userfds_lock);
449 451 return ((svc_input_id_t)-1);
450 452 }
451 453
452 454 svc_userfds = tmp;
453 455
454 456 for (i = oldSize; i < svc_nuserfds; i++) {
455 457 svc_userfds[i].list = NULL;
456 458 svc_userfds[i].mask = 0;
457 459 }
458 460 }
459 461
460 462 new_node = malloc(sizeof (_svc_user_fd_node));
461 463 if (new_node == NULL) {
462 464 syslog(LOG_ERR, "svc_add_input: out of memory");
463 465 errno = ENOMEM;
464 466 (void) mutex_unlock(&svc_userfds_lock);
465 467 return ((svc_input_id_t)-1);
466 468 }
467 469
468 470 /* create a new node */
469 471 new_node->fd = user_fd;
470 472 new_node->events = events;
471 473 new_node->callback = user_callback;
472 474 new_node->cookie = cookie;
473 475
474 476 if (_svc_attribute_new_id(new_node) == -1) {
475 477 (void) mutex_unlock(&svc_userfds_lock);
476 478 free(new_node);
477 479 return ((svc_input_id_t)-1);
478 480 }
479 481
480 482 /* Add the new element at the beginning of the list. */
481 483 if (svc_userfds[user_fd].list != NULL)
482 484 svc_userfds[user_fd].list->lnk.previous = new_node;
483 485 new_node->lnk.next = svc_userfds[user_fd].list;
484 486 new_node->lnk.previous = NULL;
485 487
486 488 svc_userfds[user_fd].list = new_node;
487 489
488 490 /* refresh global mask for this file desciptor */
489 491 svc_userfds[user_fd].mask |= events;
490 492
491 493 /* refresh mask for the poll */
492 494 add_pollfd(user_fd, (svc_userfds[user_fd].mask));
493 495
494 496 (void) mutex_unlock(&svc_userfds_lock);
495 497 return (new_node->id);
496 498 }
497 499
498 500 int
499 501 svc_remove_input(svc_input_id_t id)
500 502 {
501 503 _svc_user_fd_node* node;
502 504 _svc_user_fd_node* next;
503 505 _svc_user_fd_node* previous;
504 506 int fd; /* caching optim */
505 507
506 508 (void) mutex_lock(&svc_userfds_lock);
507 509
508 510 /* Immediately update data for id management */
509 511 if (user_fd_mgt_array == NULL || id >= svc_nmgtuserfds ||
510 512 is_free_id(id)) {
511 513 errno = EINVAL;
512 514 (void) mutex_unlock(&svc_userfds_lock);
513 515 return (-1);
514 516 }
515 517
516 518 node = node_from_id(id);
517 519 assert(node != NULL);
518 520
519 521 _svc_free_id(id);
520 522 next = node->lnk.next;
521 523 previous = node->lnk.previous;
522 524 fd = node->fd; /* caching optim */
523 525
524 526 /* Remove this node from the list. */
525 527 if (previous != NULL) {
526 528 previous->lnk.next = next;
527 529 } else {
528 530 assert(svc_userfds[fd].list == node);
529 531 svc_userfds[fd].list = next;
530 532 }
531 533 if (next != NULL)
532 534 next->lnk.previous = previous;
533 535
534 536 /* Remove the node flags from the global mask */
535 537 svc_userfds[fd].mask ^= node->events;
536 538
537 539 free(node);
538 540 if (svc_userfds[fd].mask == 0) {
539 541 assert(svc_userfds[fd].list == NULL);
540 542 remove_pollfd(fd);
541 543 } else {
542 544 assert(svc_userfds[fd].list != NULL);
543 545 }
544 546 /* <=> CLEAN NEEDED TO SHRINK MEMORY USAGE */
545 547
546 548 (void) mutex_unlock(&svc_userfds_lock);
547 549 return (0);
548 550 }
549 551
550 552 /*
551 553 * Provides default service-side functions for authentication flavors
552 554 * that do not use all the fields in struct svc_auth_ops.
553 555 */
554 556
555 557 /*ARGSUSED*/
556 558 static int
557 559 authany_wrap(AUTH *auth, XDR *xdrs, xdrproc_t xfunc, caddr_t xwhere)
558 560 {
559 561 return (*xfunc)(xdrs, xwhere);
560 562 }
561 563
562 564 struct svc_auth_ops svc_auth_any_ops = {
563 565 authany_wrap,
564 566 authany_wrap,
565 567 };
566 568
567 569 /*
568 570 * Return pointer to server authentication structure.
569 571 */
570 572 SVCAUTH *
571 573 __svc_get_svcauth(SVCXPRT *xprt)
572 574 {
573 575 /* LINTED pointer alignment */
574 576 return (&SVC_XP_AUTH(xprt));
575 577 }
576 578
577 579 /*
578 580 * A callback routine to cleanup after a procedure is executed.
579 581 */
580 582 void (*__proc_cleanup_cb)() = NULL;
581 583
582 584 void *
583 585 __svc_set_proc_cleanup_cb(void *cb)
584 586 {
585 587 void *tmp = (void *)__proc_cleanup_cb;
586 588
587 589 __proc_cleanup_cb = (void (*)())cb;
588 590 return (tmp);
589 591 }
590 592
591 593 /* *************** SVCXPRT related stuff **************** */
592 594
593 595
594 596 static int pollfd_shrinking = 1;
595 597
596 598
597 599 /*
598 600 * Add fd to svc_pollfd
599 601 */
600 602 static void
601 603 add_pollfd(int fd, short events)
602 604 {
603 605 if (fd < FD_SETSIZE) {
604 606 FD_SET(fd, &svc_fdset);
605 607 #if !defined(_LP64)
606 608 FD_SET(fd, &_new_svc_fdset);
607 609 #endif
608 610 svc_nfds++;
609 611 svc_nfds_set++;
610 612 if (fd >= svc_max_fd)
611 613 svc_max_fd = fd + 1;
612 614 }
613 615 if (fd >= svc_max_pollfd)
614 616 svc_max_pollfd = fd + 1;
615 617 if (svc_max_pollfd > svc_pollfd_allocd) {
616 618 int i = svc_pollfd_allocd;
617 619 pollfd_t *tmp;
618 620 do {
619 621 svc_pollfd_allocd += POLLFD_EXTEND;
620 622 } while (svc_max_pollfd > svc_pollfd_allocd);
621 623 tmp = realloc(svc_pollfd,
622 624 sizeof (pollfd_t) * svc_pollfd_allocd);
623 625 if (tmp != NULL) {
624 626 svc_pollfd = tmp;
625 627 for (; i < svc_pollfd_allocd; i++)
626 628 POLLFD_CLR(i, tmp);
627 629 } else {
628 630 /*
629 631 * give an error message; undo fdset setting
630 632 * above; reset the pollfd_shrinking flag.
631 633 * because of this poll will not be done
632 634 * on these fds.
633 635 */
634 636 if (fd < FD_SETSIZE) {
635 637 FD_CLR(fd, &svc_fdset);
636 638 #if !defined(_LP64)
637 639 FD_CLR(fd, &_new_svc_fdset);
638 640 #endif
639 641 svc_nfds--;
640 642 svc_nfds_set--;
641 643 if (fd == (svc_max_fd - 1))
642 644 svc_max_fd--;
643 645 }
644 646 if (fd == (svc_max_pollfd - 1))
645 647 svc_max_pollfd--;
646 648 pollfd_shrinking = 0;
647 649 syslog(LOG_ERR, "add_pollfd: out of memory");
648 650 _exit(1);
649 651 }
650 652 }
651 653 svc_pollfd[fd].fd = fd;
652 654 svc_pollfd[fd].events = events;
653 655 svc_npollfds++;
654 656 svc_npollfds_set++;
655 657 }
656 658
657 659 /*
658 660 * the fd is still active but only the bit in fdset is cleared.
659 661 * do not subtract svc_nfds or svc_npollfds
660 662 */
661 663 void
662 664 clear_pollfd(int fd)
663 665 {
664 666 if (fd < FD_SETSIZE && FD_ISSET(fd, &svc_fdset)) {
665 667 FD_CLR(fd, &svc_fdset);
666 668 #if !defined(_LP64)
667 669 FD_CLR(fd, &_new_svc_fdset);
668 670 #endif
669 671 svc_nfds_set--;
670 672 }
671 673 if (fd < svc_pollfd_allocd && POLLFD_ISSET(fd, svc_pollfd)) {
672 674 POLLFD_CLR(fd, svc_pollfd);
673 675 svc_npollfds_set--;
674 676 }
675 677 }
676 678
677 679 /*
678 680 * sets the bit in fdset for an active fd so that poll() is done for that
679 681 */
680 682 void
681 683 set_pollfd(int fd, short events)
682 684 {
683 685 if (fd < FD_SETSIZE) {
684 686 FD_SET(fd, &svc_fdset);
685 687 #if !defined(_LP64)
686 688 FD_SET(fd, &_new_svc_fdset);
687 689 #endif
688 690 svc_nfds_set++;
689 691 }
690 692 if (fd < svc_pollfd_allocd) {
691 693 svc_pollfd[fd].fd = fd;
692 694 svc_pollfd[fd].events = events;
693 695 svc_npollfds_set++;
694 696 }
695 697 }
696 698
697 699 /*
698 700 * remove a svc_pollfd entry; it does not shrink the memory
699 701 */
700 702 static void
701 703 remove_pollfd(int fd)
702 704 {
703 705 clear_pollfd(fd);
704 706 if (fd == (svc_max_fd - 1))
705 707 svc_max_fd--;
706 708 svc_nfds--;
707 709 if (fd == (svc_max_pollfd - 1))
708 710 svc_max_pollfd--;
709 711 svc_npollfds--;
710 712 }
711 713
712 714 /*
713 715 * delete a svc_pollfd entry; it shrinks the memory
714 716 * use remove_pollfd if you do not want to shrink
715 717 */
716 718 static void
717 719 delete_pollfd(int fd)
718 720 {
719 721 remove_pollfd(fd);
720 722 if (pollfd_shrinking && svc_max_pollfd <
721 723 (svc_pollfd_allocd - POLLFD_SHRINK)) {
722 724 do {
723 725 svc_pollfd_allocd -= POLLFD_SHRINK;
724 726 } while (svc_max_pollfd < (svc_pollfd_allocd - POLLFD_SHRINK));
725 727 svc_pollfd = realloc(svc_pollfd,
726 728 sizeof (pollfd_t) * svc_pollfd_allocd);
727 729 if (svc_pollfd == NULL) {
728 730 syslog(LOG_ERR, "delete_pollfd: out of memory");
729 731 _exit(1);
730 732 }
731 733 }
732 734 }
733 735
734 736
735 737 /*
736 738 * Activate a transport handle.
737 739 */
738 740 void
739 741 xprt_register(const SVCXPRT *xprt)
740 742 {
741 743 int fd = xprt->xp_fd;
742 744 #ifdef CALLBACK
743 745 extern void (*_svc_getreqset_proc)();
744 746 #endif
745 747 /* VARIABLES PROTECTED BY svc_fd_lock: svc_xports, svc_fdset */
746 748
747 749 (void) rw_wrlock(&svc_fd_lock);
748 750 if (svc_xports == NULL) {
749 751 /* allocate some small amount first */
750 752 svc_xports = calloc(FD_INCREMENT, sizeof (SVCXPRT *));
751 753 if (svc_xports == NULL) {
752 754 syslog(LOG_ERR, "xprt_register: out of memory");
753 755 _exit(1);
754 756 }
755 757 nsvc_xports = FD_INCREMENT;
756 758
757 759 #ifdef CALLBACK
758 760 /*
759 761 * XXX: This code does not keep track of the server state.
760 762 *
761 763 * This provides for callback support. When a client
762 764 * recv's a call from another client on the server fd's,
763 765 * it calls _svc_getreqset_proc() which would return
764 766 * after serving all the server requests. Also look under
765 767 * clnt_dg.c and clnt_vc.c (clnt_call part of it)
766 768 */
767 769 _svc_getreqset_proc = svc_getreq_poll;
768 770 #endif
769 771 }
770 772
771 773 while (fd >= nsvc_xports) {
772 774 SVCXPRT **tmp_xprts = svc_xports;
773 775
774 776 /* time to expand svc_xprts */
775 777 tmp_xprts = realloc(svc_xports,
776 778 sizeof (SVCXPRT *) * (nsvc_xports + FD_INCREMENT));
777 779 if (tmp_xprts == NULL) {
778 780 syslog(LOG_ERR, "xprt_register : out of memory.");
779 781 _exit(1);
780 782 }
781 783
782 784 svc_xports = tmp_xprts;
783 785 (void) memset(&svc_xports[nsvc_xports], 0,
784 786 sizeof (SVCXPRT *) * FD_INCREMENT);
785 787 nsvc_xports += FD_INCREMENT;
786 788 }
787 789
788 790 svc_xports[fd] = (SVCXPRT *)xprt;
789 791
790 792 add_pollfd(fd, MASKVAL);
791 793
792 794 if (svc_polling) {
793 795 char dummy;
794 796
795 797 /*
796 798 * This happens only in one of the MT modes.
797 799 * Wake up poller.
798 800 */
799 801 (void) write(svc_pipe[1], &dummy, sizeof (dummy));
800 802 }
801 803 /*
802 804 * If already dispatching door based services, start
803 805 * dispatching TLI based services now.
804 806 */
805 807 (void) mutex_lock(&svc_door_mutex);
806 808 if (svc_ndoorfds > 0)
807 809 (void) cond_signal(&svc_door_waitcv);
808 810 (void) mutex_unlock(&svc_door_mutex);
809 811
810 812 if (svc_xdrs == NULL) {
811 813 /* allocate initial chunk */
812 814 svc_xdrs = calloc(FD_INCREMENT, sizeof (XDR *));
813 815 if (svc_xdrs != NULL)
814 816 nsvc_xdrs = FD_INCREMENT;
815 817 else {
816 818 syslog(LOG_ERR, "xprt_register : out of memory.");
817 819 _exit(1);
818 820 }
819 821 }
820 822 (void) rw_unlock(&svc_fd_lock);
821 823 }
822 824
823 825 /*
824 826 * De-activate a transport handle.
825 827 */
826 828 void
827 829 __xprt_unregister_private(const SVCXPRT *xprt, bool_t lock_not_held)
828 830 {
829 831 int fd = xprt->xp_fd;
830 832
831 833 if (lock_not_held)
832 834 (void) rw_wrlock(&svc_fd_lock);
833 835 if ((fd < nsvc_xports) && (svc_xports[fd] == xprt)) {
834 836 svc_xports[fd] = NULL;
835 837 delete_pollfd(fd);
836 838 }
837 839 if (lock_not_held)
838 840 (void) rw_unlock(&svc_fd_lock);
839 841 __svc_rm_from_xlist(&_svc_xprtlist, xprt, &xprtlist_lock);
840 842 }
841 843
842 844 void
843 845 xprt_unregister(const SVCXPRT *xprt)
844 846 {
845 847 __xprt_unregister_private(xprt, TRUE);
846 848 }
↓ open down ↓ |
730 lines elided |
↑ open up ↑ |
847 849
848 850 /* ********************** CALLOUT list related stuff ************* */
849 851
850 852 /*
851 853 * Add a service program to the callout list.
852 854 * The dispatch routine will be called when a rpc request for this
853 855 * program number comes in.
854 856 */
855 857 bool_t
856 858 svc_reg(const SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
857 - void (*dispatch)(), const struct netconfig *nconf)
859 + void (*dispatch)(), const struct netconfig *nconf)
858 860 {
859 861 struct svc_callout *prev;
860 862 struct svc_callout *s, **s2;
861 863 struct netconfig *tnconf;
862 864 char *netid = NULL;
863 865 int flag = 0;
864 866
865 867 /* VARIABLES PROTECTED BY svc_lock: s, prev, svc_head */
866 868
867 869 if (xprt->xp_netid) {
868 870 netid = strdup(xprt->xp_netid);
869 871 flag = 1;
870 872 } else if (nconf && nconf->nc_netid) {
871 873 netid = strdup(nconf->nc_netid);
872 874 flag = 1;
873 875 } else if ((tnconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type))
874 876 != NULL) {
875 877 netid = strdup(tnconf->nc_netid);
876 878 flag = 1;
877 879 freenetconfigent(tnconf);
878 880 } /* must have been created with svc_raw_create */
879 881 if ((netid == NULL) && (flag == 1))
880 882 return (FALSE);
881 883
882 884 (void) rw_wrlock(&svc_lock);
883 885 if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
884 886 if (netid)
885 887 free(netid);
886 888 if (s->sc_dispatch == dispatch)
887 889 goto rpcb_it; /* he is registering another xptr */
888 890 (void) rw_unlock(&svc_lock);
889 891 return (FALSE);
890 892 }
891 893 s = malloc(sizeof (struct svc_callout));
892 894 if (s == NULL) {
893 895 if (netid)
894 896 free(netid);
895 897 (void) rw_unlock(&svc_lock);
896 898 return (FALSE);
897 899 }
898 900
899 901 s->sc_prog = prog;
900 902 s->sc_vers = vers;
901 903 s->sc_dispatch = dispatch;
902 904 s->sc_netid = netid;
903 905 s->sc_next = NULL;
904 906
905 907 /*
906 908 * The ordering of transports is such that the most frequently used
907 909 * one appears first. So add the new entry to the end of the list.
908 910 */
909 911 for (s2 = &svc_head; *s2 != NULL; s2 = &(*s2)->sc_next)
910 912 ;
911 913 *s2 = s;
912 914
913 915 if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
914 916 if ((((SVCXPRT *)xprt)->xp_netid = strdup(netid)) == NULL) {
915 917 syslog(LOG_ERR, "svc_reg : strdup failed.");
916 918 free(netid);
917 919 free(s);
918 920 *s2 = NULL;
919 921 (void) rw_unlock(&svc_lock);
920 922 return (FALSE);
921 923 }
922 924
923 925 rpcb_it:
924 926 (void) rw_unlock(&svc_lock);
925 927
926 928 /* now register the information with the local binder service */
927 929 if (nconf)
928 930 return (rpcb_set(prog, vers, nconf, &xprt->xp_ltaddr));
929 931 return (TRUE);
930 932 /*NOTREACHED*/
931 933 }
932 934
933 935 /*
934 936 * Remove a service program from the callout list.
935 937 */
936 938 void
937 939 svc_unreg(const rpcprog_t prog, const rpcvers_t vers)
938 940 {
939 941 struct svc_callout *prev;
940 942 struct svc_callout *s;
941 943
942 944 /* unregister the information anyway */
943 945 (void) rpcb_unset(prog, vers, NULL);
944 946
945 947 (void) rw_wrlock(&svc_lock);
946 948 while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
947 949 if (prev == NULL_SVC) {
948 950 svc_head = s->sc_next;
949 951 } else {
950 952 prev->sc_next = s->sc_next;
951 953 }
952 954 s->sc_next = NULL_SVC;
953 955 if (s->sc_netid)
954 956 free(s->sc_netid);
955 957 free(s);
956 958 }
957 959 (void) rw_unlock(&svc_lock);
958 960 }
↓ open down ↓ |
91 lines elided |
↑ open up ↑ |
959 961
960 962 #ifdef PORTMAP
961 963 /*
962 964 * Add a service program to the callout list.
963 965 * The dispatch routine will be called when a rpc request for this
964 966 * program number comes in.
965 967 * For version 2 portmappers.
966 968 */
967 969 bool_t
968 970 svc_register(SVCXPRT *xprt, rpcprog_t prog, rpcvers_t vers,
969 - void (*dispatch)(), int protocol)
971 + void (*dispatch)(), int protocol)
970 972 {
971 973 struct svc_callout *prev;
972 974 struct svc_callout *s;
973 975 struct netconfig *nconf;
974 976 char *netid = NULL;
975 977 int flag = 0;
976 978
977 979 if (xprt->xp_netid) {
978 980 netid = strdup(xprt->xp_netid);
979 981 flag = 1;
980 982 } else if ((ioctl(xprt->xp_fd, I_FIND, "timod") > 0) && ((nconf =
981 983 __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type)) != NULL)) {
982 984 /* fill in missing netid field in SVCXPRT */
983 985 netid = strdup(nconf->nc_netid);
984 986 flag = 1;
985 987 freenetconfigent(nconf);
986 988 } /* must be svc_raw_create */
987 989
988 990 if ((netid == NULL) && (flag == 1))
989 991 return (FALSE);
990 992
991 993 (void) rw_wrlock(&svc_lock);
992 994 if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
993 995 if (netid)
994 996 free(netid);
995 997 if (s->sc_dispatch == dispatch)
996 998 goto pmap_it; /* he is registering another xptr */
997 999 (void) rw_unlock(&svc_lock);
998 1000 return (FALSE);
999 1001 }
1000 1002 s = malloc(sizeof (struct svc_callout));
1001 1003 if (s == (struct svc_callout *)0) {
1002 1004 if (netid)
1003 1005 free(netid);
1004 1006 (void) rw_unlock(&svc_lock);
1005 1007 return (FALSE);
1006 1008 }
1007 1009 s->sc_prog = prog;
1008 1010 s->sc_vers = vers;
1009 1011 s->sc_dispatch = dispatch;
1010 1012 s->sc_netid = netid;
1011 1013 s->sc_next = svc_head;
1012 1014 svc_head = s;
1013 1015
1014 1016 if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
1015 1017 if ((xprt->xp_netid = strdup(netid)) == NULL) {
1016 1018 syslog(LOG_ERR, "svc_register : strdup failed.");
1017 1019 free(netid);
1018 1020 svc_head = s->sc_next;
1019 1021 free(s);
1020 1022 (void) rw_unlock(&svc_lock);
1021 1023 return (FALSE);
1022 1024 }
1023 1025
1024 1026 pmap_it:
1025 1027 (void) rw_unlock(&svc_lock);
1026 1028 /* now register the information with the local binder service */
1027 1029 if (protocol)
1028 1030 return (pmap_set(prog, vers, protocol, xprt->xp_port));
1029 1031 return (TRUE);
1030 1032 }
1031 1033
1032 1034 /*
1033 1035 * Remove a service program from the callout list.
1034 1036 * For version 2 portmappers.
1035 1037 */
1036 1038 void
1037 1039 svc_unregister(rpcprog_t prog, rpcvers_t vers)
1038 1040 {
1039 1041 struct svc_callout *prev;
1040 1042 struct svc_callout *s;
1041 1043
1042 1044 (void) rw_wrlock(&svc_lock);
1043 1045 while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
1044 1046 if (prev == NULL_SVC) {
1045 1047 svc_head = s->sc_next;
1046 1048 } else {
1047 1049 prev->sc_next = s->sc_next;
1048 1050 }
1049 1051 s->sc_next = NULL_SVC;
1050 1052 if (s->sc_netid)
1051 1053 free(s->sc_netid);
1052 1054 free(s);
1053 1055 /* unregister the information with the local binder service */
1054 1056 (void) pmap_unset(prog, vers);
1055 1057 }
1056 1058 (void) rw_unlock(&svc_lock);
1057 1059 }
1058 1060 #endif /* PORTMAP */
1059 1061
1060 1062 /*
1061 1063 * Search the callout list for a program number, return the callout
1062 1064 * struct.
1063 1065 * Also check for transport as well. Many routines such as svc_unreg
1064 1066 * dont give any corresponding transport, so dont check for transport if
1065 1067 * netid == NULL
1066 1068 */
1067 1069 static struct svc_callout *
1068 1070 svc_find(rpcprog_t prog, rpcvers_t vers, struct svc_callout **prev, char *netid)
1069 1071 {
1070 1072 struct svc_callout *s, *p;
1071 1073
1072 1074 /* WRITE LOCK HELD ON ENTRY: svc_lock */
1073 1075
1074 1076 /* assert(RW_WRITE_HELD(&svc_lock)); */
1075 1077 p = NULL_SVC;
1076 1078 for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1077 1079 if (((s->sc_prog == prog) && (s->sc_vers == vers)) &&
1078 1080 ((netid == NULL) || (s->sc_netid == NULL) ||
1079 1081 (strcmp(netid, s->sc_netid) == 0)))
1080 1082 break;
1081 1083 p = s;
1082 1084 }
1083 1085 *prev = p;
1084 1086 return (s);
↓ open down ↓ |
105 lines elided |
↑ open up ↑ |
1085 1087 }
1086 1088
1087 1089
1088 1090 /* ******************* REPLY GENERATION ROUTINES ************ */
1089 1091
1090 1092 /*
1091 1093 * Send a reply to an rpc request
1092 1094 */
1093 1095 bool_t
1094 1096 svc_sendreply(const SVCXPRT *xprt, const xdrproc_t xdr_results,
1095 - const caddr_t xdr_location)
1097 + const caddr_t xdr_location)
1096 1098 {
1097 1099 struct rpc_msg rply;
1098 1100
1099 1101 rply.rm_direction = REPLY;
1100 1102 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1101 1103 rply.acpted_rply.ar_verf = xprt->xp_verf;
1102 1104 rply.acpted_rply.ar_stat = SUCCESS;
1103 1105 rply.acpted_rply.ar_results.where = xdr_location;
1104 1106 rply.acpted_rply.ar_results.proc = xdr_results;
1105 1107 return (SVC_REPLY((SVCXPRT *)xprt, &rply));
1106 1108 }
1107 1109
1108 1110 /*
1109 1111 * No procedure error reply
1110 1112 */
1111 1113 void
1112 1114 svcerr_noproc(const SVCXPRT *xprt)
1113 1115 {
1114 1116 struct rpc_msg rply;
1115 1117
1116 1118 rply.rm_direction = REPLY;
1117 1119 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1118 1120 rply.acpted_rply.ar_verf = xprt->xp_verf;
1119 1121 rply.acpted_rply.ar_stat = PROC_UNAVAIL;
1120 1122 SVC_REPLY((SVCXPRT *)xprt, &rply);
1121 1123 }
1122 1124
1123 1125 /*
1124 1126 * Can't decode args error reply
1125 1127 */
1126 1128 void
1127 1129 svcerr_decode(const SVCXPRT *xprt)
1128 1130 {
1129 1131 struct rpc_msg rply;
1130 1132
1131 1133 rply.rm_direction = REPLY;
1132 1134 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1133 1135 rply.acpted_rply.ar_verf = xprt->xp_verf;
1134 1136 rply.acpted_rply.ar_stat = GARBAGE_ARGS;
1135 1137 SVC_REPLY((SVCXPRT *)xprt, &rply);
1136 1138 }
1137 1139
1138 1140 /*
1139 1141 * Some system error
1140 1142 */
1141 1143 void
1142 1144 svcerr_systemerr(const SVCXPRT *xprt)
1143 1145 {
1144 1146 struct rpc_msg rply;
1145 1147
1146 1148 rply.rm_direction = REPLY;
1147 1149 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1148 1150 rply.acpted_rply.ar_verf = xprt->xp_verf;
1149 1151 rply.acpted_rply.ar_stat = SYSTEM_ERR;
1150 1152 SVC_REPLY((SVCXPRT *)xprt, &rply);
1151 1153 }
1152 1154
1153 1155 /*
1154 1156 * Tell RPC package to not complain about version errors to the client. This
1155 1157 * is useful when revving broadcast protocols that sit on a fixed address.
1156 1158 * There is really one (or should be only one) example of this kind of
1157 1159 * protocol: the portmapper (or rpc binder).
1158 1160 */
1159 1161 void
1160 1162 __svc_versquiet_on(const SVCXPRT *xprt)
1161 1163 {
1162 1164 /* LINTED pointer alignment */
1163 1165 svc_flags(xprt) |= SVC_VERSQUIET;
1164 1166 }
1165 1167
1166 1168 void
1167 1169 __svc_versquiet_off(const SVCXPRT *xprt)
1168 1170 {
1169 1171 /* LINTED pointer alignment */
1170 1172 svc_flags(xprt) &= ~SVC_VERSQUIET;
1171 1173 }
1172 1174
1173 1175 void
1174 1176 svc_versquiet(const SVCXPRT *xprt)
1175 1177 {
1176 1178 __svc_versquiet_on(xprt);
1177 1179 }
1178 1180
1179 1181 int
1180 1182 __svc_versquiet_get(const SVCXPRT *xprt)
1181 1183 {
1182 1184 /* LINTED pointer alignment */
1183 1185 return (svc_flags(xprt) & SVC_VERSQUIET);
1184 1186 }
1185 1187
1186 1188 /*
1187 1189 * Authentication error reply
1188 1190 */
1189 1191 void
1190 1192 svcerr_auth(const SVCXPRT *xprt, const enum auth_stat why)
1191 1193 {
1192 1194 struct rpc_msg rply;
1193 1195
1194 1196 rply.rm_direction = REPLY;
1195 1197 rply.rm_reply.rp_stat = MSG_DENIED;
1196 1198 rply.rjcted_rply.rj_stat = AUTH_ERROR;
1197 1199 rply.rjcted_rply.rj_why = why;
1198 1200 SVC_REPLY((SVCXPRT *)xprt, &rply);
1199 1201 }
1200 1202
1201 1203 /*
1202 1204 * Auth too weak error reply
1203 1205 */
1204 1206 void
1205 1207 svcerr_weakauth(const SVCXPRT *xprt)
1206 1208 {
1207 1209 svcerr_auth(xprt, AUTH_TOOWEAK);
1208 1210 }
1209 1211
1210 1212 /*
1211 1213 * Program unavailable error reply
1212 1214 */
1213 1215 void
1214 1216 svcerr_noprog(const SVCXPRT *xprt)
1215 1217 {
1216 1218 struct rpc_msg rply;
1217 1219
1218 1220 rply.rm_direction = REPLY;
1219 1221 rply.rm_reply.rp_stat = MSG_ACCEPTED;
↓ open down ↓ |
114 lines elided |
↑ open up ↑ |
1220 1222 rply.acpted_rply.ar_verf = xprt->xp_verf;
1221 1223 rply.acpted_rply.ar_stat = PROG_UNAVAIL;
1222 1224 SVC_REPLY((SVCXPRT *)xprt, &rply);
1223 1225 }
1224 1226
1225 1227 /*
1226 1228 * Program version mismatch error reply
1227 1229 */
1228 1230 void
1229 1231 svcerr_progvers(const SVCXPRT *xprt, const rpcvers_t low_vers,
1230 - const rpcvers_t high_vers)
1232 + const rpcvers_t high_vers)
1231 1233 {
1232 1234 struct rpc_msg rply;
1233 1235
1234 1236 rply.rm_direction = REPLY;
1235 1237 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1236 1238 rply.acpted_rply.ar_verf = xprt->xp_verf;
1237 1239 rply.acpted_rply.ar_stat = PROG_MISMATCH;
1238 1240 rply.acpted_rply.ar_vers.low = low_vers;
1239 1241 rply.acpted_rply.ar_vers.high = high_vers;
1240 1242 SVC_REPLY((SVCXPRT *)xprt, &rply);
1241 1243 }
1242 1244
1243 1245 /* ******************* SERVER INPUT STUFF ******************* */
1244 1246
1245 1247 /*
1246 1248 * Get server side input from some transport.
1247 1249 *
1248 1250 * Statement of authentication parameters management:
1249 1251 * This function owns and manages all authentication parameters, specifically
1250 1252 * the "raw" parameters (msg.rm_call.cb_cred and msg.rm_call.cb_verf) and
1251 1253 * the "cooked" credentials (rqst->rq_clntcred).
1252 1254 * However, this function does not know the structure of the cooked
1253 1255 * credentials, so it make the following assumptions:
1254 1256 * a) the structure is contiguous (no pointers), and
1255 1257 * b) the cred structure size does not exceed RQCRED_SIZE bytes.
1256 1258 * In all events, all three parameters are freed upon exit from this routine.
1257 1259 * The storage is trivially management on the call stack in user land, but
1258 1260 * is mallocated in kernel land.
1259 1261 */
1260 1262
1261 1263 void
1262 1264 svc_getreq(int rdfds)
1263 1265 {
1264 1266 fd_set readfds;
1265 1267
1266 1268 FD_ZERO(&readfds);
1267 1269 readfds.fds_bits[0] = rdfds;
1268 1270 svc_getreqset(&readfds);
1269 1271 }
1270 1272
1271 1273 void
1272 1274 svc_getreqset(fd_set *readfds)
1273 1275 {
1274 1276 int i;
1275 1277
1276 1278 for (i = 0; i < svc_max_fd; i++) {
1277 1279 /* fd has input waiting */
1278 1280 if (FD_ISSET(i, readfds))
1279 1281 svc_getreq_common(i);
1280 1282 }
1281 1283 }
1282 1284
1283 1285 void
1284 1286 svc_getreq_poll(struct pollfd *pfdp, const int pollretval)
1285 1287 {
1286 1288 int i;
1287 1289 int fds_found;
1288 1290
1289 1291 for (i = fds_found = 0; fds_found < pollretval; i++) {
1290 1292 struct pollfd *p = &pfdp[i];
1291 1293
1292 1294 if (p->revents) {
1293 1295 /* fd has input waiting */
1294 1296 fds_found++;
1295 1297 /*
1296 1298 * We assume that this function is only called
1297 1299 * via someone select()ing from svc_fdset or
1298 1300 * poll()ing from svc_pollset[]. Thus it's safe
1299 1301 * to handle the POLLNVAL event by simply turning
1300 1302 * the corresponding bit off in svc_fdset. The
1301 1303 * svc_pollset[] array is derived from svc_fdset
1302 1304 * and so will also be updated eventually.
1303 1305 *
1304 1306 * XXX Should we do an xprt_unregister() instead?
1305 1307 */
1306 1308 /* Handle user callback */
1307 1309 if (__is_a_userfd(p->fd) == TRUE) {
1308 1310 (void) rw_rdlock(&svc_fd_lock);
1309 1311 __svc_getreq_user(p);
1310 1312 (void) rw_unlock(&svc_fd_lock);
1311 1313 } else {
1312 1314 if (p->revents & POLLNVAL) {
1313 1315 (void) rw_wrlock(&svc_fd_lock);
1314 1316 remove_pollfd(p->fd); /* XXX */
1315 1317 (void) rw_unlock(&svc_fd_lock);
1316 1318 } else {
1317 1319 svc_getreq_common(p->fd);
1318 1320 }
1319 1321 }
1320 1322 }
1321 1323 }
1322 1324 }
1323 1325
1324 1326 void
1325 1327 svc_getreq_common(const int fd)
1326 1328 {
1327 1329 SVCXPRT *xprt;
1328 1330 enum xprt_stat stat;
1329 1331 struct rpc_msg *msg;
1330 1332 struct svc_req *r;
1331 1333 char *cred_area;
1332 1334
1333 1335 (void) rw_rdlock(&svc_fd_lock);
1334 1336
1335 1337 /* HANDLE USER CALLBACK */
1336 1338 if (__is_a_userfd(fd) == TRUE) {
1337 1339 struct pollfd virtual_fd;
1338 1340
1339 1341 virtual_fd.events = virtual_fd.revents = (short)0xFFFF;
1340 1342 virtual_fd.fd = fd;
1341 1343 __svc_getreq_user(&virtual_fd);
1342 1344 (void) rw_unlock(&svc_fd_lock);
1343 1345 return;
1344 1346 }
1345 1347
1346 1348 /*
1347 1349 * The transport associated with this fd could have been
1348 1350 * removed from svc_timeout_nonblock_xprt_and_LRU, for instance.
1349 1351 * This can happen if two or more fds get read events and are
1350 1352 * passed to svc_getreq_poll/set, the first fd is seviced by
1351 1353 * the dispatch routine and cleans up any dead transports. If
1352 1354 * one of the dead transports removed is the other fd that
1353 1355 * had a read event then svc_getreq_common() will be called with no
1354 1356 * xprt associated with the fd that had the original read event.
1355 1357 */
1356 1358 if ((fd >= nsvc_xports) || (xprt = svc_xports[fd]) == NULL) {
1357 1359 (void) rw_unlock(&svc_fd_lock);
1358 1360 return;
1359 1361 }
1360 1362 (void) rw_unlock(&svc_fd_lock);
1361 1363 /* LINTED pointer alignment */
1362 1364 msg = SVCEXT(xprt)->msg;
1363 1365 /* LINTED pointer alignment */
1364 1366 r = SVCEXT(xprt)->req;
1365 1367 /* LINTED pointer alignment */
1366 1368 cred_area = SVCEXT(xprt)->cred_area;
1367 1369 msg->rm_call.cb_cred.oa_base = cred_area;
1368 1370 msg->rm_call.cb_verf.oa_base = &(cred_area[MAX_AUTH_BYTES]);
1369 1371 r->rq_clntcred = &(cred_area[2 * MAX_AUTH_BYTES]);
1370 1372
1371 1373 /* receive msgs from xprtprt (support batch calls) */
1372 1374 do {
1373 1375 bool_t dispatch;
1374 1376
1375 1377 if (dispatch = SVC_RECV(xprt, msg))
1376 1378 (void) _svc_prog_dispatch(xprt, msg, r);
1377 1379 /*
1378 1380 * Check if the xprt has been disconnected in a recursive call
1379 1381 * in the service dispatch routine. If so, then break
1380 1382 */
1381 1383 (void) rw_rdlock(&svc_fd_lock);
1382 1384 if (xprt != svc_xports[fd]) {
1383 1385 (void) rw_unlock(&svc_fd_lock);
1384 1386 break;
1385 1387 }
1386 1388 (void) rw_unlock(&svc_fd_lock);
1387 1389
1388 1390 /*
1389 1391 * Call cleanup procedure if set.
1390 1392 */
1391 1393 if (__proc_cleanup_cb != NULL && dispatch)
1392 1394 (*__proc_cleanup_cb)(xprt);
1393 1395
1394 1396 if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
1395 1397 SVC_DESTROY(xprt);
1396 1398 break;
1397 1399 }
1398 1400 } while (stat == XPRT_MOREREQS);
1399 1401 }
1400 1402
1401 1403 int
1402 1404 _svc_prog_dispatch(SVCXPRT *xprt, struct rpc_msg *msg, struct svc_req *r)
1403 1405 {
1404 1406 struct svc_callout *s;
1405 1407 enum auth_stat why;
1406 1408 int prog_found;
1407 1409 rpcvers_t low_vers;
1408 1410 rpcvers_t high_vers;
1409 1411 void (*disp_fn)();
1410 1412
1411 1413 r->rq_xprt = xprt;
1412 1414 r->rq_prog = msg->rm_call.cb_prog;
1413 1415 r->rq_vers = msg->rm_call.cb_vers;
1414 1416 r->rq_proc = msg->rm_call.cb_proc;
1415 1417 r->rq_cred = msg->rm_call.cb_cred;
1416 1418 /* LINTED pointer alignment */
1417 1419 SVC_XP_AUTH(r->rq_xprt).svc_ah_ops = svc_auth_any_ops;
1418 1420 /* LINTED pointer alignment */
1419 1421 SVC_XP_AUTH(r->rq_xprt).svc_ah_private = NULL;
1420 1422
1421 1423 /* first authenticate the message */
1422 1424 /* Check for null flavor and bypass these calls if possible */
1423 1425
1424 1426 if (msg->rm_call.cb_cred.oa_flavor == AUTH_NULL) {
1425 1427 r->rq_xprt->xp_verf.oa_flavor = _null_auth.oa_flavor;
1426 1428 r->rq_xprt->xp_verf.oa_length = 0;
1427 1429 } else {
1428 1430 bool_t no_dispatch;
1429 1431
1430 1432 if ((why = __gss_authenticate(r, msg,
1431 1433 &no_dispatch)) != AUTH_OK) {
1432 1434 svcerr_auth(xprt, why);
1433 1435 return (0);
1434 1436 }
1435 1437 if (no_dispatch)
1436 1438 return (0);
1437 1439 }
1438 1440 /* match message with a registered service */
1439 1441 prog_found = FALSE;
1440 1442 low_vers = (rpcvers_t)(0 - 1);
1441 1443 high_vers = 0;
1442 1444 (void) rw_rdlock(&svc_lock);
1443 1445 for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1444 1446 if (s->sc_prog == r->rq_prog) {
1445 1447 prog_found = TRUE;
1446 1448 if (s->sc_vers == r->rq_vers) {
1447 1449 if ((xprt->xp_netid == NULL) ||
1448 1450 (s->sc_netid == NULL) ||
1449 1451 (strcmp(xprt->xp_netid,
1450 1452 s->sc_netid) == 0)) {
1451 1453 disp_fn = (*s->sc_dispatch);
1452 1454 (void) rw_unlock(&svc_lock);
1453 1455 disp_fn(r, xprt);
1454 1456 return (1);
1455 1457 }
1456 1458 prog_found = FALSE;
1457 1459 }
1458 1460 if (s->sc_vers < low_vers)
1459 1461 low_vers = s->sc_vers;
1460 1462 if (s->sc_vers > high_vers)
1461 1463 high_vers = s->sc_vers;
1462 1464 } /* found correct program */
1463 1465 }
1464 1466 (void) rw_unlock(&svc_lock);
1465 1467
1466 1468 /*
1467 1469 * if we got here, the program or version
1468 1470 * is not served ...
1469 1471 */
1470 1472 if (prog_found) {
1471 1473 /* LINTED pointer alignment */
1472 1474 if (!version_keepquiet(xprt))
1473 1475 svcerr_progvers(xprt, low_vers, high_vers);
1474 1476 } else {
1475 1477 svcerr_noprog(xprt);
1476 1478 }
1477 1479 return (0);
1478 1480 }
1479 1481
1480 1482 /* ******************* SVCXPRT allocation and deallocation ***************** */
1481 1483
1482 1484 /*
1483 1485 * svc_xprt_alloc() - allocate a service transport handle
1484 1486 */
1485 1487 SVCXPRT *
1486 1488 svc_xprt_alloc(void)
1487 1489 {
1488 1490 SVCXPRT *xprt = NULL;
1489 1491 SVCXPRT_EXT *xt = NULL;
1490 1492 SVCXPRT_LIST *xlist = NULL;
1491 1493 struct rpc_msg *msg = NULL;
1492 1494 struct svc_req *req = NULL;
1493 1495 char *cred_area = NULL;
1494 1496
1495 1497 if ((xprt = calloc(1, sizeof (SVCXPRT))) == NULL)
1496 1498 goto err_exit;
1497 1499
1498 1500 if ((xt = calloc(1, sizeof (SVCXPRT_EXT))) == NULL)
1499 1501 goto err_exit;
1500 1502 xprt->xp_p3 = (caddr_t)xt; /* SVCEXT(xprt) = xt */
1501 1503
1502 1504 if ((xlist = calloc(1, sizeof (SVCXPRT_LIST))) == NULL)
1503 1505 goto err_exit;
1504 1506 xt->my_xlist = xlist;
1505 1507 xlist->xprt = xprt;
1506 1508
1507 1509 if ((msg = malloc(sizeof (struct rpc_msg))) == NULL)
1508 1510 goto err_exit;
1509 1511 xt->msg = msg;
1510 1512
1511 1513 if ((req = malloc(sizeof (struct svc_req))) == NULL)
1512 1514 goto err_exit;
1513 1515 xt->req = req;
1514 1516
1515 1517 if ((cred_area = malloc(2*MAX_AUTH_BYTES + RQCRED_SIZE)) == NULL)
1516 1518 goto err_exit;
1517 1519 xt->cred_area = cred_area;
1518 1520
1519 1521 /* LINTED pointer alignment */
1520 1522 (void) mutex_init(&svc_send_mutex(xprt), USYNC_THREAD, (void *)0);
1521 1523 return (xprt);
1522 1524
1523 1525 err_exit:
1524 1526 svc_xprt_free(xprt);
1525 1527 return (NULL);
1526 1528 }
1527 1529
1528 1530
1529 1531 /*
1530 1532 * svc_xprt_free() - free a service handle
1531 1533 */
1532 1534 void
1533 1535 svc_xprt_free(SVCXPRT *xprt)
1534 1536 {
1535 1537 /* LINTED pointer alignment */
1536 1538 SVCXPRT_EXT *xt = xprt ? SVCEXT(xprt) : NULL;
1537 1539 SVCXPRT_LIST *my_xlist = xt ? xt->my_xlist: NULL;
1538 1540 struct rpc_msg *msg = xt ? xt->msg : NULL;
1539 1541 struct svc_req *req = xt ? xt->req : NULL;
1540 1542 char *cred_area = xt ? xt->cred_area : NULL;
1541 1543
1542 1544 if (xprt)
1543 1545 free(xprt);
1544 1546 if (xt)
1545 1547 free(xt);
1546 1548 if (my_xlist)
1547 1549 free(my_xlist);
1548 1550 if (msg)
1549 1551 free(msg);
1550 1552 if (req)
1551 1553 free(req);
1552 1554 if (cred_area)
1553 1555 free(cred_area);
1554 1556 }
1555 1557
1556 1558
1557 1559 /*
1558 1560 * svc_xprt_destroy() - free parent and child xprt list
1559 1561 */
1560 1562 void
1561 1563 svc_xprt_destroy(SVCXPRT *xprt)
1562 1564 {
1563 1565 SVCXPRT_LIST *xlist, *xnext = NULL;
1564 1566 int type;
1565 1567
1566 1568 /* LINTED pointer alignment */
1567 1569 if (SVCEXT(xprt)->parent)
1568 1570 /* LINTED pointer alignment */
1569 1571 xprt = SVCEXT(xprt)->parent;
1570 1572 /* LINTED pointer alignment */
1571 1573 type = svc_type(xprt);
1572 1574 /* LINTED pointer alignment */
1573 1575 for (xlist = SVCEXT(xprt)->my_xlist; xlist != NULL; xlist = xnext) {
1574 1576 xnext = xlist->next;
1575 1577 xprt = xlist->xprt;
1576 1578 switch (type) {
1577 1579 case SVC_DGRAM:
1578 1580 svc_dg_xprtfree(xprt);
1579 1581 break;
1580 1582 case SVC_RENDEZVOUS:
1581 1583 svc_vc_xprtfree(xprt);
1582 1584 break;
1583 1585 case SVC_CONNECTION:
1584 1586 svc_fd_xprtfree(xprt);
1585 1587 break;
1586 1588 case SVC_DOOR:
1587 1589 svc_door_xprtfree(xprt);
1588 1590 break;
1589 1591 }
1590 1592 }
1591 1593 }
1592 1594
1593 1595
1594 1596 /*
1595 1597 * svc_copy() - make a copy of parent
1596 1598 */
1597 1599 SVCXPRT *
1598 1600 svc_copy(SVCXPRT *xprt)
1599 1601 {
1600 1602 /* LINTED pointer alignment */
1601 1603 switch (svc_type(xprt)) {
1602 1604 case SVC_DGRAM:
1603 1605 return (svc_dg_xprtcopy(xprt));
1604 1606 case SVC_RENDEZVOUS:
1605 1607 return (svc_vc_xprtcopy(xprt));
1606 1608 case SVC_CONNECTION:
1607 1609 return (svc_fd_xprtcopy(xprt));
1608 1610 }
1609 1611 return (NULL);
1610 1612 }
1611 1613
1612 1614
1613 1615 /*
1614 1616 * _svc_destroy_private() - private SVC_DESTROY interface
1615 1617 */
1616 1618 void
1617 1619 _svc_destroy_private(SVCXPRT *xprt)
1618 1620 {
1619 1621 /* LINTED pointer alignment */
1620 1622 switch (svc_type(xprt)) {
1621 1623 case SVC_DGRAM:
1622 1624 _svc_dg_destroy_private(xprt);
1623 1625 break;
1624 1626 case SVC_RENDEZVOUS:
1625 1627 case SVC_CONNECTION:
1626 1628 _svc_vc_destroy_private(xprt, TRUE);
1627 1629 break;
1628 1630 }
1629 1631 }
1630 1632
1631 1633 /*
1632 1634 * svc_get_local_cred() - fetch local user credentials. This always
1633 1635 * works over doors based transports. For local transports, this
1634 1636 * does not yield correct results unless the __rpc_negotiate_uid()
1635 1637 * call has been invoked to enable this feature.
1636 1638 */
1637 1639 bool_t
1638 1640 svc_get_local_cred(SVCXPRT *xprt, svc_local_cred_t *lcred)
1639 1641 {
1640 1642 /* LINTED pointer alignment */
1641 1643 if (svc_type(xprt) == SVC_DOOR)
1642 1644 return (__svc_get_door_cred(xprt, lcred));
1643 1645 return (__rpc_get_local_cred(xprt, lcred));
1644 1646 }
1645 1647
1646 1648
1647 1649 /* ******************* DUPLICATE ENTRY HANDLING ROUTINES ************** */
1648 1650
1649 1651 /*
1650 1652 * the dup cacheing routines below provide a cache of received
1651 1653 * transactions. rpc service routines can use this to detect
1652 1654 * retransmissions and re-send a non-failure response. Uses a
1653 1655 * lru scheme to find entries to get rid of entries in the cache,
1654 1656 * though only DUP_DONE entries are placed on the lru list.
1655 1657 * the routines were written towards development of a generic
1656 1658 * SVC_DUP() interface, which can be expanded to encompass the
1657 1659 * svc_dg_enablecache() routines as well. the cache is currently
1658 1660 * private to the automounter.
1659 1661 */
1660 1662
1661 1663
1662 1664 /* dupcache header contains xprt specific information */
1663 1665 struct dupcache {
1664 1666 rwlock_t dc_lock;
1665 1667 time_t dc_time;
1666 1668 int dc_buckets;
1667 1669 int dc_maxsz;
1668 1670 int dc_basis;
1669 1671 struct dupreq *dc_mru;
1670 1672 struct dupreq **dc_hashtbl;
1671 1673 };
1672 1674
1673 1675 /*
1674 1676 * private duplicate cache request routines
1675 1677 */
1676 1678 static int __svc_dupcache_check(struct svc_req *, caddr_t *, uint_t *,
1677 1679 struct dupcache *, uint32_t, uint32_t);
1678 1680 static struct dupreq *__svc_dupcache_victim(struct dupcache *, time_t);
1679 1681 static int __svc_dupcache_enter(struct svc_req *, struct dupreq *,
1680 1682 struct dupcache *, uint32_t, uint32_t, time_t);
1681 1683 static int __svc_dupcache_update(struct svc_req *, caddr_t, uint_t, int,
1682 1684 struct dupcache *, uint32_t, uint32_t);
1683 1685 #ifdef DUP_DEBUG
1684 1686 static void __svc_dupcache_debug(struct dupcache *);
1685 1687 #endif /* DUP_DEBUG */
1686 1688
1687 1689 /* default parameters for the dupcache */
1688 1690 #define DUPCACHE_BUCKETS 257
1689 1691 #define DUPCACHE_TIME 900
1690 1692 #define DUPCACHE_MAXSZ INT_MAX
1691 1693
1692 1694 /*
1693 1695 * __svc_dupcache_init(void *condition, int basis, char *xprt_cache)
1694 1696 * initialize the duprequest cache and assign it to the xprt_cache
1695 1697 * Use default values depending on the cache condition and basis.
1696 1698 * return TRUE on success and FALSE on failure
1697 1699 */
1698 1700 bool_t
1699 1701 __svc_dupcache_init(void *condition, int basis, char **xprt_cache)
1700 1702 {
1701 1703 static mutex_t initdc_lock = DEFAULTMUTEX;
1702 1704 int i;
1703 1705 struct dupcache *dc;
1704 1706
1705 1707 (void) mutex_lock(&initdc_lock);
1706 1708 if (*xprt_cache != NULL) { /* do only once per xprt */
1707 1709 (void) mutex_unlock(&initdc_lock);
1708 1710 syslog(LOG_ERR,
1709 1711 "__svc_dupcache_init: multiply defined dup cache");
1710 1712 return (FALSE);
1711 1713 }
1712 1714
1713 1715 switch (basis) {
1714 1716 case DUPCACHE_FIXEDTIME:
1715 1717 dc = malloc(sizeof (struct dupcache));
1716 1718 if (dc == NULL) {
1717 1719 (void) mutex_unlock(&initdc_lock);
1718 1720 syslog(LOG_ERR,
1719 1721 "__svc_dupcache_init: memory alloc failed");
1720 1722 return (FALSE);
1721 1723 }
1722 1724 (void) rwlock_init(&(dc->dc_lock), USYNC_THREAD, NULL);
1723 1725 if (condition != NULL)
1724 1726 dc->dc_time = *((time_t *)condition);
1725 1727 else
1726 1728 dc->dc_time = DUPCACHE_TIME;
1727 1729 dc->dc_buckets = DUPCACHE_BUCKETS;
1728 1730 dc->dc_maxsz = DUPCACHE_MAXSZ;
1729 1731 dc->dc_basis = basis;
1730 1732 dc->dc_mru = NULL;
1731 1733 dc->dc_hashtbl = malloc(dc->dc_buckets *
1732 1734 sizeof (struct dupreq *));
1733 1735 if (dc->dc_hashtbl == NULL) {
1734 1736 free(dc);
1735 1737 (void) mutex_unlock(&initdc_lock);
1736 1738 syslog(LOG_ERR,
1737 1739 "__svc_dupcache_init: memory alloc failed");
1738 1740 return (FALSE);
1739 1741 }
1740 1742 for (i = 0; i < DUPCACHE_BUCKETS; i++)
1741 1743 dc->dc_hashtbl[i] = NULL;
1742 1744 *xprt_cache = (char *)dc;
1743 1745 break;
1744 1746 default:
1745 1747 (void) mutex_unlock(&initdc_lock);
1746 1748 syslog(LOG_ERR,
1747 1749 "__svc_dupcache_init: undefined dup cache basis");
1748 1750 return (FALSE);
1749 1751 }
1750 1752
1751 1753 (void) mutex_unlock(&initdc_lock);
1752 1754
1753 1755 return (TRUE);
1754 1756 }
1755 1757
1756 1758 /*
↓ open down ↓ |
516 lines elided |
↑ open up ↑ |
1757 1759 * __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1758 1760 * char *xprt_cache)
1759 1761 * searches the request cache. Creates an entry and returns DUP_NEW if
1760 1762 * the request is not found in the cache. If it is found, then it
1761 1763 * returns the state of the request (in progress, drop, or done) and
1762 1764 * also allocates, and passes back results to the user (if any) in
1763 1765 * resp_buf, and its length in resp_bufsz. DUP_ERROR is returned on error.
1764 1766 */
1765 1767 int
1766 1768 __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1767 - char *xprt_cache)
1769 + char *xprt_cache)
1768 1770 {
1769 1771 uint32_t drxid, drhash;
1770 1772 int rc;
1771 1773 struct dupreq *dr = NULL;
1772 1774 time_t timenow = time(NULL);
1773 1775
1774 1776 /* LINTED pointer alignment */
1775 1777 struct dupcache *dc = (struct dupcache *)xprt_cache;
1776 1778
1777 1779 if (dc == NULL) {
1778 1780 syslog(LOG_ERR, "__svc_dup: undefined cache");
1779 1781 return (DUP_ERROR);
1780 1782 }
1781 1783
1782 1784 /* get the xid of the request */
1783 1785 if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
1784 1786 syslog(LOG_ERR, "__svc_dup: xid error");
1785 1787 return (DUP_ERROR);
1786 1788 }
1787 1789 drhash = drxid % dc->dc_buckets;
1788 1790
1789 1791 if ((rc = __svc_dupcache_check(req, resp_buf, resp_bufsz, dc, drxid,
1790 1792 drhash)) != DUP_NEW)
1791 1793 return (rc);
1792 1794
1793 1795 if ((dr = __svc_dupcache_victim(dc, timenow)) == NULL)
1794 1796 return (DUP_ERROR);
1795 1797
1796 1798 if ((rc = __svc_dupcache_enter(req, dr, dc, drxid, drhash, timenow))
1797 1799 == DUP_ERROR)
1798 1800 return (rc);
1799 1801
1800 1802 return (DUP_NEW);
1801 1803 }
1802 1804
1803 1805
1804 1806
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
1805 1807 /*
1806 1808 * __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf,
1807 1809 * uint_t *resp_bufsz,truct dupcache *dc, uint32_t drxid,
1808 1810 * uint32_t drhash)
1809 1811 * Checks to see whether an entry already exists in the cache. If it does
1810 1812 * copy back into the resp_buf, if appropriate. Return the status of
1811 1813 * the request, or DUP_NEW if the entry is not in the cache
1812 1814 */
1813 1815 static int
1814 1816 __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1815 - struct dupcache *dc, uint32_t drxid, uint32_t drhash)
1817 + struct dupcache *dc, uint32_t drxid, uint32_t drhash)
1816 1818 {
1817 1819 struct dupreq *dr = NULL;
1818 1820
1819 1821 (void) rw_rdlock(&(dc->dc_lock));
1820 1822 dr = dc->dc_hashtbl[drhash];
1821 1823 while (dr != NULL) {
1822 1824 if (dr->dr_xid == drxid &&
1823 1825 dr->dr_proc == req->rq_proc &&
1824 1826 dr->dr_prog == req->rq_prog &&
1825 1827 dr->dr_vers == req->rq_vers &&
1826 1828 dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
1827 1829 memcmp(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
1828 1830 dr->dr_addr.len) == 0) { /* entry found */
1829 1831 if (dr->dr_hash != drhash) {
1830 1832 /* sanity check */
1831 1833 (void) rw_unlock((&dc->dc_lock));
1832 1834 syslog(LOG_ERR,
1833 1835 "\n__svc_dupdone: hashing error");
1834 1836 return (DUP_ERROR);
1835 1837 }
1836 1838
1837 1839 /*
1838 1840 * return results for requests on lru list, if
1839 1841 * appropriate requests must be DUP_DROP or DUP_DONE
1840 1842 * to have a result. A NULL buffer in the cache
1841 1843 * implies no results were sent during dupdone.
1842 1844 * A NULL buffer in the call implies not interested
1843 1845 * in results.
1844 1846 */
1845 1847 if (((dr->dr_status == DUP_DONE) ||
1846 1848 (dr->dr_status == DUP_DROP)) &&
1847 1849 resp_buf != NULL &&
1848 1850 dr->dr_resp.buf != NULL) {
1849 1851 *resp_buf = malloc(dr->dr_resp.len);
1850 1852 if (*resp_buf == NULL) {
1851 1853 syslog(LOG_ERR,
1852 1854 "__svc_dupcache_check: malloc failed");
1853 1855 (void) rw_unlock(&(dc->dc_lock));
1854 1856 return (DUP_ERROR);
1855 1857 }
1856 1858 (void) memset(*resp_buf, 0, dr->dr_resp.len);
1857 1859 (void) memcpy(*resp_buf, dr->dr_resp.buf,
1858 1860 dr->dr_resp.len);
1859 1861 *resp_bufsz = dr->dr_resp.len;
1860 1862 } else {
1861 1863 /* no result */
1862 1864 if (resp_buf)
1863 1865 *resp_buf = NULL;
1864 1866 if (resp_bufsz)
1865 1867 *resp_bufsz = 0;
1866 1868 }
1867 1869 (void) rw_unlock(&(dc->dc_lock));
1868 1870 return (dr->dr_status);
1869 1871 }
1870 1872 dr = dr->dr_chain;
1871 1873 }
1872 1874 (void) rw_unlock(&(dc->dc_lock));
1873 1875 return (DUP_NEW);
1874 1876 }
1875 1877
1876 1878 /*
1877 1879 * __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1878 1880 * Return a victim dupreq entry to the caller, depending on cache policy.
1879 1881 */
1880 1882 static struct dupreq *
1881 1883 __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1882 1884 {
1883 1885 struct dupreq *dr = NULL;
1884 1886
1885 1887 switch (dc->dc_basis) {
1886 1888 case DUPCACHE_FIXEDTIME:
1887 1889 /*
1888 1890 * The hash policy is to free up a bit of the hash
1889 1891 * table before allocating a new entry as the victim.
1890 1892 * Freeing up the hash table each time should split
1891 1893 * the cost of keeping the hash table clean among threads.
1892 1894 * Note that only DONE or DROPPED entries are on the lru
1893 1895 * list but we do a sanity check anyway.
1894 1896 */
1895 1897 (void) rw_wrlock(&(dc->dc_lock));
1896 1898 while ((dc->dc_mru) && (dr = dc->dc_mru->dr_next) &&
1897 1899 ((timenow - dr->dr_time) > dc->dc_time)) {
1898 1900 /* clean and then free the entry */
1899 1901 if (dr->dr_status != DUP_DONE &&
1900 1902 dr->dr_status != DUP_DROP) {
1901 1903 /*
1902 1904 * The LRU list can't contain an
1903 1905 * entry where the status is other than
1904 1906 * DUP_DONE or DUP_DROP.
1905 1907 */
1906 1908 syslog(LOG_ERR,
1907 1909 "__svc_dupcache_victim: bad victim");
1908 1910 #ifdef DUP_DEBUG
1909 1911 /*
1910 1912 * Need to hold the reader/writers lock to
1911 1913 * print the cache info, since we already
1912 1914 * hold the writers lock, we shall continue
1913 1915 * calling __svc_dupcache_debug()
1914 1916 */
1915 1917 __svc_dupcache_debug(dc);
1916 1918 #endif /* DUP_DEBUG */
1917 1919 (void) rw_unlock(&(dc->dc_lock));
1918 1920 return (NULL);
1919 1921 }
1920 1922 /* free buffers */
1921 1923 if (dr->dr_resp.buf) {
1922 1924 free(dr->dr_resp.buf);
1923 1925 dr->dr_resp.buf = NULL;
1924 1926 }
1925 1927 if (dr->dr_addr.buf) {
1926 1928 free(dr->dr_addr.buf);
1927 1929 dr->dr_addr.buf = NULL;
1928 1930 }
1929 1931
1930 1932 /* unhash the entry */
1931 1933 if (dr->dr_chain)
1932 1934 dr->dr_chain->dr_prevchain = dr->dr_prevchain;
1933 1935 if (dr->dr_prevchain)
1934 1936 dr->dr_prevchain->dr_chain = dr->dr_chain;
1935 1937 if (dc->dc_hashtbl[dr->dr_hash] == dr)
1936 1938 dc->dc_hashtbl[dr->dr_hash] = dr->dr_chain;
1937 1939
1938 1940 /* modify the lru pointers */
1939 1941 if (dc->dc_mru == dr) {
1940 1942 dc->dc_mru = NULL;
1941 1943 } else {
1942 1944 dc->dc_mru->dr_next = dr->dr_next;
1943 1945 dr->dr_next->dr_prev = dc->dc_mru;
1944 1946 }
1945 1947 free(dr);
1946 1948 dr = NULL;
1947 1949 }
1948 1950 (void) rw_unlock(&(dc->dc_lock));
1949 1951
1950 1952 /*
1951 1953 * Allocate and return new clean entry as victim
1952 1954 */
1953 1955 if ((dr = malloc(sizeof (*dr))) == NULL) {
1954 1956 syslog(LOG_ERR,
1955 1957 "__svc_dupcache_victim: malloc failed");
1956 1958 return (NULL);
1957 1959 }
1958 1960 (void) memset(dr, 0, sizeof (*dr));
1959 1961 return (dr);
1960 1962 default:
1961 1963 syslog(LOG_ERR,
1962 1964 "__svc_dupcache_victim: undefined dup cache_basis");
1963 1965 return (NULL);
↓ open down ↓ |
138 lines elided |
↑ open up ↑ |
1964 1966 }
1965 1967 }
1966 1968
1967 1969 /*
1968 1970 * __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1969 1971 * struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1970 1972 * build new duprequest entry and then insert into the cache
1971 1973 */
1972 1974 static int
1973 1975 __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1974 - struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1976 + struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1975 1977 {
1976 1978 dr->dr_xid = drxid;
1977 1979 dr->dr_prog = req->rq_prog;
1978 1980 dr->dr_vers = req->rq_vers;
1979 1981 dr->dr_proc = req->rq_proc;
1980 1982 dr->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len;
1981 1983 dr->dr_addr.len = dr->dr_addr.maxlen;
1982 1984 if ((dr->dr_addr.buf = malloc(dr->dr_addr.maxlen)) == NULL) {
1983 1985 syslog(LOG_ERR, "__svc_dupcache_enter: malloc failed");
1984 1986 free(dr);
1985 1987 return (DUP_ERROR);
1986 1988 }
1987 1989 (void) memset(dr->dr_addr.buf, 0, dr->dr_addr.len);
1988 1990 (void) memcpy(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
1989 1991 dr->dr_addr.len);
1990 1992 dr->dr_resp.buf = NULL;
1991 1993 dr->dr_resp.maxlen = 0;
1992 1994 dr->dr_resp.len = 0;
1993 1995 dr->dr_status = DUP_INPROGRESS;
1994 1996 dr->dr_time = timenow;
1995 1997 dr->dr_hash = drhash; /* needed for efficient victim cleanup */
1996 1998
1997 1999 /* place entry at head of hash table */
1998 2000 (void) rw_wrlock(&(dc->dc_lock));
1999 2001 dr->dr_chain = dc->dc_hashtbl[drhash];
2000 2002 dr->dr_prevchain = NULL;
2001 2003 if (dc->dc_hashtbl[drhash] != NULL)
2002 2004 dc->dc_hashtbl[drhash]->dr_prevchain = dr;
2003 2005 dc->dc_hashtbl[drhash] = dr;
2004 2006 (void) rw_unlock(&(dc->dc_lock));
2005 2007 return (DUP_NEW);
2006 2008 }
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
2007 2009
2008 2010 /*
2009 2011 * __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2010 2012 * int status, char *xprt_cache)
2011 2013 * Marks the request done (DUP_DONE or DUP_DROP) and stores the response.
2012 2014 * Only DONE and DROP requests can be marked as done. Sets the lru pointers
2013 2015 * to make the entry the most recently used. Returns DUP_ERROR or status.
2014 2016 */
2015 2017 int
2016 2018 __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2017 - int status, char *xprt_cache)
2019 + int status, char *xprt_cache)
2018 2020 {
2019 2021 uint32_t drxid, drhash;
2020 2022 int rc;
2021 2023
2022 2024 /* LINTED pointer alignment */
2023 2025 struct dupcache *dc = (struct dupcache *)xprt_cache;
2024 2026
2025 2027 if (dc == NULL) {
2026 2028 syslog(LOG_ERR, "__svc_dupdone: undefined cache");
2027 2029 return (DUP_ERROR);
2028 2030 }
2029 2031
2030 2032 if (status != DUP_DONE && status != DUP_DROP) {
2031 2033 syslog(LOG_ERR, "__svc_dupdone: invalid dupdone status");
2032 2034 syslog(LOG_ERR, " must be DUP_DONE or DUP_DROP");
2033 2035 return (DUP_ERROR);
2034 2036 }
2035 2037
2036 2038 /* find the xid of the entry in the cache */
2037 2039 if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
2038 2040 syslog(LOG_ERR, "__svc_dup: xid error");
2039 2041 return (DUP_ERROR);
2040 2042 }
2041 2043 drhash = drxid % dc->dc_buckets;
2042 2044
2043 2045 /* update the status of the entry and result buffers, if required */
2044 2046 if ((rc = __svc_dupcache_update(req, resp_buf, resp_bufsz, status,
2045 2047 dc, drxid, drhash)) == DUP_ERROR) {
2046 2048 syslog(LOG_ERR, "__svc_dupdone: cache entry error");
2047 2049 return (DUP_ERROR);
2048 2050 }
2049 2051
2050 2052 return (rc);
2051 2053 }
2052 2054
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
2053 2055 /*
2054 2056 * __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf,
2055 2057 * uint_t resp_bufsz, int status, struct dupcache *dc, uint32_t drxid,
2056 2058 * uint32_t drhash)
2057 2059 * Check if entry exists in the dupcacache. If it does, update its status
2058 2060 * and time and also its buffer, if appropriate. Its possible, but unlikely
2059 2061 * for DONE requests to not exist in the cache. Return DUP_ERROR or status.
2060 2062 */
2061 2063 static int
2062 2064 __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2063 - int status, struct dupcache *dc, uint32_t drxid, uint32_t drhash)
2065 + int status, struct dupcache *dc, uint32_t drxid, uint32_t drhash)
2064 2066 {
2065 2067 struct dupreq *dr = NULL;
2066 2068 time_t timenow = time(NULL);
2067 2069
2068 2070 (void) rw_wrlock(&(dc->dc_lock));
2069 2071 dr = dc->dc_hashtbl[drhash];
2070 2072 while (dr != NULL) {
2071 2073 if (dr->dr_xid == drxid &&
2072 2074 dr->dr_proc == req->rq_proc &&
2073 2075 dr->dr_prog == req->rq_prog &&
2074 2076 dr->dr_vers == req->rq_vers &&
2075 2077 dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
2076 2078 memcmp(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
2077 2079 dr->dr_addr.len) == 0) { /* entry found */
2078 2080 if (dr->dr_hash != drhash) {
2079 2081 /* sanity check */
2080 2082 (void) rw_unlock(&(dc->dc_lock));
2081 2083 syslog(LOG_ERR,
2082 2084 "\n__svc_dupdone: hashing error");
2083 2085 return (DUP_ERROR);
2084 2086 }
2085 2087
2086 2088 /* store the results if bufer is not NULL */
2087 2089 if (resp_buf != NULL) {
2088 2090 if ((dr->dr_resp.buf =
2089 2091 malloc(resp_bufsz)) == NULL) {
2090 2092 (void) rw_unlock(&(dc->dc_lock));
2091 2093 syslog(LOG_ERR,
2092 2094 "__svc_dupdone: malloc failed");
2093 2095 return (DUP_ERROR);
2094 2096 }
2095 2097 (void) memset(dr->dr_resp.buf, 0, resp_bufsz);
2096 2098 (void) memcpy(dr->dr_resp.buf, resp_buf,
2097 2099 (uint_t)resp_bufsz);
2098 2100 dr->dr_resp.len = resp_bufsz;
2099 2101 }
2100 2102
2101 2103 /* update status and done time */
2102 2104 dr->dr_status = status;
2103 2105 dr->dr_time = timenow;
2104 2106
2105 2107 /* move the entry to the mru position */
2106 2108 if (dc->dc_mru == NULL) {
2107 2109 dr->dr_next = dr;
2108 2110 dr->dr_prev = dr;
2109 2111 } else {
2110 2112 dr->dr_next = dc->dc_mru->dr_next;
2111 2113 dc->dc_mru->dr_next->dr_prev = dr;
2112 2114 dr->dr_prev = dc->dc_mru;
2113 2115 dc->dc_mru->dr_next = dr;
2114 2116 }
2115 2117 dc->dc_mru = dr;
2116 2118
2117 2119 (void) rw_unlock(&(dc->dc_lock));
2118 2120 return (status);
2119 2121 }
2120 2122 dr = dr->dr_chain;
2121 2123 }
2122 2124 (void) rw_unlock(&(dc->dc_lock));
2123 2125 syslog(LOG_ERR, "__svc_dupdone: entry not in dup cache");
2124 2126 return (DUP_ERROR);
2125 2127 }
2126 2128
2127 2129 #ifdef DUP_DEBUG
2128 2130 /*
2129 2131 * __svc_dupcache_debug(struct dupcache *dc)
2130 2132 * print out the hash table stuff
2131 2133 *
2132 2134 * This function requires the caller to hold the reader
2133 2135 * or writer version of the duplicate request cache lock (dc_lock).
2134 2136 */
2135 2137 static void
2136 2138 __svc_dupcache_debug(struct dupcache *dc)
2137 2139 {
2138 2140 struct dupreq *dr = NULL;
2139 2141 int i;
2140 2142 bool_t bval;
2141 2143
2142 2144 fprintf(stderr, " HASHTABLE\n");
2143 2145 for (i = 0; i < dc->dc_buckets; i++) {
2144 2146 bval = FALSE;
2145 2147 dr = dc->dc_hashtbl[i];
2146 2148 while (dr != NULL) {
2147 2149 if (!bval) { /* ensures bucket printed only once */
2148 2150 fprintf(stderr, " bucket : %d\n", i);
2149 2151 bval = TRUE;
2150 2152 }
2151 2153 fprintf(stderr, "\txid: %u status: %d time: %ld",
2152 2154 dr->dr_xid, dr->dr_status, dr->dr_time);
2153 2155 fprintf(stderr, " dr: %x chain: %x prevchain: %x\n",
2154 2156 dr, dr->dr_chain, dr->dr_prevchain);
2155 2157 dr = dr->dr_chain;
2156 2158 }
2157 2159 }
2158 2160
2159 2161 fprintf(stderr, " LRU\n");
2160 2162 if (dc->dc_mru) {
2161 2163 dr = dc->dc_mru->dr_next; /* lru */
2162 2164 while (dr != dc->dc_mru) {
2163 2165 fprintf(stderr, "\txid: %u status : %d time : %ld",
2164 2166 dr->dr_xid, dr->dr_status, dr->dr_time);
2165 2167 fprintf(stderr, " dr: %x next: %x prev: %x\n",
2166 2168 dr, dr->dr_next, dr->dr_prev);
2167 2169 dr = dr->dr_next;
2168 2170 }
2169 2171 fprintf(stderr, "\txid: %u status: %d time: %ld",
2170 2172 dr->dr_xid, dr->dr_status, dr->dr_time);
2171 2173 fprintf(stderr, " dr: %x next: %x prev: %x\n",
2172 2174 dr, dr->dr_next, dr->dr_prev);
2173 2175 }
2174 2176 }
2175 2177 #endif /* DUP_DEBUG */
↓ open down ↓ |
102 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX