1 /* $NetBSD: svc_dg.c,v 1.14 2012/03/20 17:14:50 matt Exp $ */
4 * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
5 * unrestricted use provided that this legend is included on all tape
6 * media and as a part of the software program in whole or part. Users
7 * may copy or modify Sun RPC without charge, but are not authorized
8 * to license or distribute it to anyone else except as part of a product or
9 * program developed by the user.
11 * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
12 * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
13 * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
15 * Sun RPC is provided with no support and without any obligation on the
16 * part of Sun Microsystems, Inc. to assist in its use, correction,
17 * modification or enhancement.
19 * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
20 * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
21 * OR ANY PART THEREOF.
23 * In no event will Sun Microsystems, Inc. be liable for any lost revenue
24 * or profits or other special, indirect and consequential damages, even if
25 * Sun has been advised of the possibility of such damages.
27 * Sun Microsystems, Inc.
29 * Mountain View, California 94043
33 * Copyright (c) 1986-1991 by Sun Microsystems Inc.
36 /* #ident "@(#)svc_dg.c 1.17 94/04/24 SMI" */
40 * svc_dg.c, Server side for connectionless RPC.
42 * Does some caching in the hopes of achieving execute-at-most-once semantics.
45 #include <sys/cdefs.h>
46 #if defined(LIBC_SCCS) && !defined(lint)
47 __RCSID("$NetBSD: svc_dg.c,v 1.14 2012/03/20 17:14:50 matt Exp $");
50 #include "namespace.h"
51 #include "reentrant.h"
52 #include <sys/types.h>
53 #include <sys/socket.h>
61 #ifdef RPC_CACHE_DEBUG
62 #include <netconfig.h>
67 #include "rpc_internal.h"
70 #define su_data(xprt) ((struct svc_dg_data *)(xprt->xp_p2))
71 #define rpc_buffer(xprt) ((xprt)->xp_p1)
74 __weak_alias(svc_dg_create
,_svc_dg_create
)
78 #define MAX(a, b) (((a) > (b)) ? (a) : (b))
81 static void svc_dg_ops(SVCXPRT
*);
82 static enum xprt_stat
svc_dg_stat(SVCXPRT
*);
83 static bool_t
svc_dg_recv(SVCXPRT
*, struct rpc_msg
*);
84 static bool_t
svc_dg_reply(SVCXPRT
*, struct rpc_msg
*);
85 static bool_t
svc_dg_getargs(SVCXPRT
*, xdrproc_t
, caddr_t
);
86 static bool_t
svc_dg_freeargs(SVCXPRT
*, xdrproc_t
, caddr_t
);
87 static void svc_dg_destroy(SVCXPRT
*);
88 static bool_t
svc_dg_control(SVCXPRT
*, const u_int
, void *);
89 static int cache_get(SVCXPRT
*, struct rpc_msg
*, char **, size_t *);
90 static void cache_set(SVCXPRT
*, size_t);
94 * xprt = svc_dg_create(sock, sendsize, recvsize);
95 * Does other connectionless specific initializations.
96 * Once *xprt is initialized, it is registered.
97 * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable
98 * system defaults are chosen.
99 * The routines returns NULL if a problem occurred.
101 static const char svc_dg_str
[] = "svc_dg_create: %s";
102 static const char svc_dg_err1
[] = "could not get transport information";
103 static const char svc_dg_err2
[] = " transport does not support data transfer";
104 static const char __no_mem_str
[] = "out of memory";
107 svc_dg_create(int fd
, u_int sendsize
, u_int recvsize
)
110 struct svc_dg_data
*su
= NULL
;
111 struct __rpc_sockinfo si
;
112 struct sockaddr_storage ss
;
115 if (!__rpc_fd2sockinfo(fd
, &si
)) {
116 warnx(svc_dg_str
, svc_dg_err1
);
120 * Find the receive and the send size
122 sendsize
= __rpc_get_t_size(si
.si_af
, si
.si_proto
, (int)sendsize
);
123 recvsize
= __rpc_get_t_size(si
.si_af
, si
.si_proto
, (int)recvsize
);
124 if ((sendsize
== 0) || (recvsize
== 0)) {
125 warnx(svc_dg_str
, svc_dg_err2
);
129 xprt
= mem_alloc(sizeof (SVCXPRT
));
132 memset(xprt
, 0, sizeof (SVCXPRT
));
134 su
= mem_alloc(sizeof (*su
));
137 su
->su_iosz
= ((MAX(sendsize
, recvsize
) + 3) / 4) * 4;
138 if ((rpc_buffer(xprt
) = malloc(su
->su_iosz
)) == NULL
)
140 _DIAGASSERT(__type_fit(u_int
, su
->su_iosz
));
141 xdrmem_create(&(su
->su_xdrs
), rpc_buffer(xprt
), (u_int
)su
->su_iosz
,
145 xprt
->xp_p2
= (caddr_t
)(void *)su
;
146 xprt
->xp_verf
.oa_base
= su
->su_verfbody
;
148 xprt
->xp_rtaddr
.maxlen
= sizeof (struct sockaddr_storage
);
151 if (getsockname(fd
, (struct sockaddr
*)(void *)&ss
, &slen
) < 0)
153 xprt
->xp_ltaddr
.buf
= mem_alloc(sizeof (struct sockaddr_storage
));
154 xprt
->xp_ltaddr
.maxlen
= sizeof (struct sockaddr_storage
);
155 xprt
->xp_ltaddr
.len
= slen
;
156 memcpy(xprt
->xp_ltaddr
.buf
, &ss
, slen
);
161 (void) warnx(svc_dg_str
, __no_mem_str
);
164 (void) mem_free(su
, sizeof (*su
));
165 (void) mem_free(xprt
, sizeof (SVCXPRT
));
171 static enum xprt_stat
172 svc_dg_stat(SVCXPRT
*xprt
)
178 svc_dg_recv(SVCXPRT
*xprt
, struct rpc_msg
*msg
)
180 struct svc_dg_data
*su
;
183 struct sockaddr_storage ss
;
188 _DIAGASSERT(xprt
!= NULL
);
189 _DIAGASSERT(msg
!= NULL
);
192 xdrs
= &(su
->su_xdrs
);
195 alen
= sizeof (struct sockaddr_storage
);
196 rlen
= recvfrom(xprt
->xp_fd
, rpc_buffer(xprt
), su
->su_iosz
, 0,
197 (struct sockaddr
*)(void *)&ss
, &alen
);
198 if (rlen
== -1 && errno
== EINTR
)
200 if (rlen
== -1 || (rlen
< (ssize_t
)(4 * sizeof (u_int32_t
))))
202 if (xprt
->xp_rtaddr
.len
< alen
) {
203 if (xprt
->xp_rtaddr
.len
!= 0)
204 mem_free(xprt
->xp_rtaddr
.buf
, xprt
->xp_rtaddr
.len
);
205 xprt
->xp_rtaddr
.buf
= mem_alloc(alen
);
206 xprt
->xp_rtaddr
.len
= alen
;
208 memcpy(xprt
->xp_rtaddr
.buf
, &ss
, alen
);
210 if (ss
.ss_family
== AF_INET
) {
211 xprt
->xp_raddr
= *(struct sockaddr_in
*)xprt
->xp_rtaddr
.buf
;
212 xprt
->xp_addrlen
= sizeof (struct sockaddr_in
);
215 xdrs
->x_op
= XDR_DECODE
;
217 if (! xdr_callmsg(xdrs
, msg
)) {
220 su
->su_xid
= msg
->rm_xid
;
221 if (su
->su_cache
!= NULL
) {
222 if (cache_get(xprt
, msg
, &reply
, &replylen
)) {
223 (void)sendto(xprt
->xp_fd
, reply
, replylen
, 0,
224 (struct sockaddr
*)(void *)&ss
, alen
);
232 svc_dg_reply(SVCXPRT
*xprt
, struct rpc_msg
*msg
)
234 struct svc_dg_data
*su
;
239 _DIAGASSERT(xprt
!= NULL
);
240 _DIAGASSERT(msg
!= NULL
);
243 xdrs
= &(su
->su_xdrs
);
245 xdrs
->x_op
= XDR_ENCODE
;
247 msg
->rm_xid
= su
->su_xid
;
248 if (xdr_replymsg(xdrs
, msg
)) {
249 slen
= XDR_GETPOS(xdrs
);
250 if (sendto(xprt
->xp_fd
, rpc_buffer(xprt
), slen
, 0,
251 (struct sockaddr
*)xprt
->xp_rtaddr
.buf
,
252 (socklen_t
)xprt
->xp_rtaddr
.len
) == (ssize_t
) slen
) {
255 cache_set(xprt
, slen
);
262 svc_dg_getargs(SVCXPRT
*xprt
, xdrproc_t xdr_args
, caddr_t args_ptr
)
264 return (*xdr_args
)(&(su_data(xprt
)->su_xdrs
), args_ptr
);
268 svc_dg_freeargs(SVCXPRT
*xprt
, xdrproc_t xdr_args
, caddr_t args_ptr
)
272 _DIAGASSERT(xprt
!= NULL
);
274 xdrs
= &(su_data(xprt
)->su_xdrs
);
275 xdrs
->x_op
= XDR_FREE
;
276 return (*xdr_args
)(xdrs
, args_ptr
);
280 svc_dg_destroy(SVCXPRT
*xprt
)
282 struct svc_dg_data
*su
;
284 _DIAGASSERT(xprt
!= NULL
);
288 xprt_unregister(xprt
);
289 if (xprt
->xp_fd
!= -1)
290 (void)close(xprt
->xp_fd
);
291 XDR_DESTROY(&(su
->su_xdrs
));
292 (void) mem_free(rpc_buffer(xprt
), su
->su_iosz
);
293 (void) mem_free(su
, sizeof (*su
));
294 if (xprt
->xp_rtaddr
.buf
)
295 (void) mem_free(xprt
->xp_rtaddr
.buf
, xprt
->xp_rtaddr
.maxlen
);
296 if (xprt
->xp_ltaddr
.buf
)
297 (void) mem_free(xprt
->xp_ltaddr
.buf
, xprt
->xp_ltaddr
.maxlen
);
299 (void) free(xprt
->xp_tp
);
300 (void) mem_free(xprt
, sizeof (SVCXPRT
));
305 svc_dg_control(SVCXPRT
*xprt
, const u_int rq
, void *in
)
311 svc_dg_ops(SVCXPRT
*xprt
)
313 static struct xp_ops ops
;
314 static struct xp_ops2 ops2
;
316 extern mutex_t ops_lock
;
319 _DIAGASSERT(xprt
!= NULL
);
321 /* VARIABLES PROTECTED BY ops_lock: ops */
323 mutex_lock(&ops_lock
);
324 if (ops
.xp_recv
== NULL
) {
325 ops
.xp_recv
= svc_dg_recv
;
326 ops
.xp_stat
= svc_dg_stat
;
327 ops
.xp_getargs
= svc_dg_getargs
;
328 ops
.xp_reply
= svc_dg_reply
;
329 ops
.xp_freeargs
= svc_dg_freeargs
;
330 ops
.xp_destroy
= svc_dg_destroy
;
331 ops2
.xp_control
= svc_dg_control
;
334 xprt
->xp_ops2
= &ops2
;
335 mutex_unlock(&ops_lock
);
338 /* The CACHING COMPONENT */
341 * Could have been a separate file, but some part of it depends upon the
342 * private structure of the client handle.
344 * Fifo cache for cl server
345 * Copies pointers to reply buffers into fifo cache
346 * Buffers are sent again if retransmissions are detected.
349 #define SPARSENESS 4 /* 75% sparse */
351 #define ALLOC(type, size) \
352 mem_alloc((sizeof (type) * (size)))
354 #define MEMZERO(addr, type, size) \
355 (void) memset((void *) (addr), 0, sizeof (type) * (int) (size))
357 #define FREE(addr, type, size) \
358 mem_free((addr), (sizeof (type) * (size)))
361 * An entry in the cache
363 typedef struct cache_node
*cache_ptr
;
366 * Index into cache is xid, proc, vers, prog and address
369 rpcproc_t cache_proc
;
370 rpcvers_t cache_vers
;
371 rpcprog_t cache_prog
;
372 struct netbuf cache_addr
;
374 * The cached reply and length
377 size_t cache_replylen
;
379 * Next node on the list, if there is a collision
381 cache_ptr cache_next
;
388 u_int uc_size
; /* size of cache */
389 cache_ptr
*uc_entries
; /* hash table of entries in cache */
390 cache_ptr
*uc_fifo
; /* fifo list of entries in cache */
391 u_int uc_nextvictim
; /* points to next victim in fifo list */
392 rpcprog_t uc_prog
; /* saved program number */
393 rpcvers_t uc_vers
; /* saved version number */
394 rpcproc_t uc_proc
; /* saved procedure number */
399 * the hashing function
401 #define CACHE_LOC(transp, xid) \
402 (xid % (SPARSENESS * ((struct cl_cache *) \
403 su_data(transp)->su_cache)->uc_size))
406 extern mutex_t dupreq_lock
;
410 * Enable use of the cache. Returns 1 on success, 0 on failure.
411 * Note: there is no disable.
413 static const char cache_enable_str
[] = "svc_enablecache: %s %s";
414 static const char alloc_err
[] = "could not allocate cache ";
415 static const char enable_err
[] = "cache already enabled";
418 svc_dg_enablecache(SVCXPRT
*transp
, u_int size
)
420 struct svc_dg_data
*su
;
423 _DIAGASSERT(transp
!= NULL
);
425 su
= su_data(transp
);
427 mutex_lock(&dupreq_lock
);
428 if (su
->su_cache
!= NULL
) {
429 (void) warnx(cache_enable_str
, enable_err
, " ");
430 mutex_unlock(&dupreq_lock
);
433 uc
= ALLOC(struct cl_cache
, 1);
435 warnx(cache_enable_str
, alloc_err
, " ");
436 mutex_unlock(&dupreq_lock
);
440 uc
->uc_nextvictim
= 0;
441 uc
->uc_entries
= ALLOC(cache_ptr
, size
* SPARSENESS
);
442 if (uc
->uc_entries
== NULL
) {
443 warnx(cache_enable_str
, alloc_err
, "data");
444 FREE(uc
, struct cl_cache
, 1);
445 mutex_unlock(&dupreq_lock
);
448 MEMZERO(uc
->uc_entries
, cache_ptr
, size
* SPARSENESS
);
449 uc
->uc_fifo
= ALLOC(cache_ptr
, size
);
450 if (uc
->uc_fifo
== NULL
) {
451 warnx(cache_enable_str
, alloc_err
, "fifo");
452 FREE(uc
->uc_entries
, cache_ptr
, size
* SPARSENESS
);
453 FREE(uc
, struct cl_cache
, 1);
454 mutex_unlock(&dupreq_lock
);
457 MEMZERO(uc
->uc_fifo
, cache_ptr
, size
);
458 su
->su_cache
= (char *)(void *)uc
;
459 mutex_unlock(&dupreq_lock
);
464 * Set an entry in the cache. It assumes that the uc entry is set from
465 * the earlier call to cache_get() for the same procedure. This will always
466 * happen because cache_get() is calle by svc_dg_recv and cache_set() is called
467 * by svc_dg_reply(). All this hoopla because the right RPC parameters are
468 * not available at svc_dg_reply time.
471 static const char cache_set_str
[] = "cache_set: %s";
472 static const char cache_set_err1
[] = "victim not found";
473 static const char cache_set_err2
[] = "victim alloc failed";
474 static const char cache_set_err3
[] = "could not allocate new rpc buffer";
477 cache_set(SVCXPRT
*xprt
, size_t replylen
)
481 struct svc_dg_data
*su
;
485 #ifdef RPC_CACHE_DEBUG
486 struct netconfig
*nconf
;
490 _DIAGASSERT(xprt
!= NULL
);
493 uc
= (struct cl_cache
*) su
->su_cache
;
495 mutex_lock(&dupreq_lock
);
497 * Find space for the new entry, either by
498 * reusing an old entry, or by mallocing a new one
500 victim
= uc
->uc_fifo
[uc
->uc_nextvictim
];
501 if (victim
!= NULL
) {
502 loc
= CACHE_LOC(xprt
, victim
->cache_xid
);
503 for (vicp
= &uc
->uc_entries
[loc
];
504 *vicp
!= NULL
&& *vicp
!= victim
;
505 vicp
= &(*vicp
)->cache_next
)
508 warnx(cache_set_str
, cache_set_err1
);
509 mutex_unlock(&dupreq_lock
);
512 *vicp
= victim
->cache_next
; /* remove from cache */
513 newbuf
= victim
->cache_reply
;
515 victim
= ALLOC(struct cache_node
, 1);
516 if (victim
== NULL
) {
517 warnx(cache_set_str
, cache_set_err2
);
518 mutex_unlock(&dupreq_lock
);
521 newbuf
= mem_alloc(su
->su_iosz
);
522 if (newbuf
== NULL
) {
523 warnx(cache_set_str
, cache_set_err3
);
524 FREE(victim
, struct cache_node
, 1);
525 mutex_unlock(&dupreq_lock
);
533 #ifdef RPC_CACHE_DEBUG
534 if (nconf
= getnetconfigent(xprt
->xp_netid
)) {
535 uaddr
= taddr2uaddr(nconf
, &xprt
->xp_rtaddr
);
536 freenetconfigent(nconf
);
538 "cache set for xid= %x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
539 su
->su_xid
, uc
->uc_prog
, uc
->uc_vers
,
544 victim
->cache_replylen
= replylen
;
545 victim
->cache_reply
= rpc_buffer(xprt
);
546 rpc_buffer(xprt
) = newbuf
;
547 _DIAGASSERT(__type_fit(u_int
, su
->su_iosz
));
548 xdrmem_create(&(su
->su_xdrs
), rpc_buffer(xprt
), (u_int
)su
->su_iosz
,
550 victim
->cache_xid
= su
->su_xid
;
551 victim
->cache_proc
= uc
->uc_proc
;
552 victim
->cache_vers
= uc
->uc_vers
;
553 victim
->cache_prog
= uc
->uc_prog
;
554 victim
->cache_addr
= xprt
->xp_rtaddr
;
555 victim
->cache_addr
.buf
= ALLOC(char, xprt
->xp_rtaddr
.len
);
556 (void) memcpy(victim
->cache_addr
.buf
, xprt
->xp_rtaddr
.buf
,
557 (size_t)xprt
->xp_rtaddr
.len
);
558 loc
= CACHE_LOC(xprt
, victim
->cache_xid
);
559 victim
->cache_next
= uc
->uc_entries
[loc
];
560 uc
->uc_entries
[loc
] = victim
;
561 uc
->uc_fifo
[uc
->uc_nextvictim
++] = victim
;
562 uc
->uc_nextvictim
%= uc
->uc_size
;
563 mutex_unlock(&dupreq_lock
);
567 * Try to get an entry from the cache
568 * return 1 if found, 0 if not found and set the stage for cache_set()
571 cache_get(SVCXPRT
*xprt
, struct rpc_msg
*msg
, char **replyp
, size_t *replylenp
)
575 struct svc_dg_data
*su
;
577 #ifdef RPC_CACHE_DEBUG
578 struct netconfig
*nconf
;
582 _DIAGASSERT(xprt
!= NULL
);
583 _DIAGASSERT(msg
!= NULL
);
584 _DIAGASSERT(replyp
!= NULL
);
585 _DIAGASSERT(replylenp
!= NULL
);
588 uc
= (struct cl_cache
*) su
->su_cache
;
590 mutex_lock(&dupreq_lock
);
591 loc
= CACHE_LOC(xprt
, su
->su_xid
);
592 for (ent
= uc
->uc_entries
[loc
]; ent
!= NULL
; ent
= ent
->cache_next
) {
593 if (ent
->cache_xid
== su
->su_xid
&&
594 ent
->cache_proc
== msg
->rm_call
.cb_proc
&&
595 ent
->cache_vers
== msg
->rm_call
.cb_vers
&&
596 ent
->cache_prog
== msg
->rm_call
.cb_prog
&&
597 ent
->cache_addr
.len
== xprt
->xp_rtaddr
.len
&&
598 (memcmp(ent
->cache_addr
.buf
, xprt
->xp_rtaddr
.buf
,
599 xprt
->xp_rtaddr
.len
) == 0)) {
600 #ifdef RPC_CACHE_DEBUG
601 if (nconf
= getnetconfigent(xprt
->xp_netid
)) {
602 uaddr
= taddr2uaddr(nconf
, &xprt
->xp_rtaddr
);
603 freenetconfigent(nconf
);
605 "cache entry found for xid=%x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
606 su
->su_xid
, msg
->rm_call
.cb_prog
,
607 msg
->rm_call
.cb_vers
,
608 msg
->rm_call
.cb_proc
, uaddr
);
612 *replyp
= ent
->cache_reply
;
613 *replylenp
= ent
->cache_replylen
;
614 mutex_unlock(&dupreq_lock
);
619 * Failed to find entry
620 * Remember a few things so we can do a set later
622 uc
->uc_proc
= msg
->rm_call
.cb_proc
;
623 uc
->uc_vers
= msg
->rm_call
.cb_vers
;
624 uc
->uc_prog
= msg
->rm_call
.cb_prog
;
625 mutex_unlock(&dupreq_lock
);