4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * The snmp library helps to prepare the PDUs and communicate with
29 * the snmp agent on the SP side via the ds_snmp driver.
40 #include <sys/types.h>
43 #include <libnvpair.h>
44 #include <sys/ds_snmp.h>
46 #include "libpiclsnmp.h"
52 #pragma init(libpiclsnmp_init) /* need this in .init */
55 * Data from the MIB is fetched based on the hints about object
56 * groups received from (possibly many threads in) the application.
57 * However, the fetched data is kept in a common cache for use across
58 * all threads, so even a GETBULK is issued only when absolutely
61 * Note that locking is not fine grained (there's no locking per row)
62 * since we don't expect too many MT consumers right away.
65 static mutex_t mibcache_lock
;
66 static nvlist_t
**mibcache
= NULL
;
67 static uint_t n_mibcache_rows
= 0;
69 static mutex_t snmp_reqid_lock
;
70 static int snmp_reqid
= 1;
73 uint_t snmp_nsends
= 0;
74 uint_t snmp_sentbytes
= 0;
75 uint_t snmp_nrecvs
= 0;
76 uint_t snmp_rcvdbytes
= 0;
80 #define SNMP_DEFAULT_PORT 161
81 #define SNMP_MAX_RECV_PKTSZ (64 * 1024)
85 * We need a reliably monotonic and stable source of time values to age
86 * entries in the mibcache toward expiration. The code originally used
87 * gettimeofday(), but since that is subject to time-of-day changes made by
88 * the administrator, the values it returns do not satisfy our needs.
89 * Instead, we use gethrtime(), which is immune to time-of-day changes.
90 * However, since gethrtime() returns a signed 64-bit value in units of
91 * nanoseconds and we are using signed 32-bit timestamps, we always divide
92 * the result by (HRTIME_SCALE * NANOSEC) to scale it down into units of 10
95 * Note that the scaling factor means that the value of MAX_INCACHE_TIME
96 * from snmplib.h should also be in units of 10 seconds.
98 #define GET_SCALED_HRTIME() (int)(gethrtime() / (HRTIME_SCALE * NANOSEC))
101 * The mibcache code originally cached values for 300 seconds after fetching
102 * data via SNMP. Subsequent reads within that 300 second window would come
103 * from the cache - which is quite a bit faster than an SNMP query - but the
104 * first request that came in more than 300 seconds after the previous SNMP
105 * query would trigger a new SNMP query. This worked well as an
106 * optimization for frequent queries, but when data was only queried less
107 * frequently than every 300 seconds (as proved to be the case at multiple
108 * customer sites), the cache didn't help at all.
110 * To improve the performance of infrequent queries, code was added to the
111 * library to allow a client (i.e. a thread in the picl plugin) to proactively
112 * refresh cache entries without waiting for them to expire, thereby ensuring
113 * that all volatile entries in the cache at any given time are less than 300
114 * seconds old. Whenever an SNMP query is generated to retrieve volatile data
115 * that will be cached, an entry is added in a refresh queue that tracks the
116 * parameters of the query and the time that it was made. A client can query
117 * the age of the oldest item in the refresh queue and - at its discretion - can
118 * then force that query to be repeated in a manner that will update the
119 * mibcache entry even though it hasn't expired.
122 struct picl_snmphdl
*smd
;
126 int last_fetch_time
; /* in scaled hrtime */
129 static mutex_t refreshq_lock
;
130 static refreshq_job_t
*refreshq
= NULL
;
131 static uint_t n_refreshq_slots
= 0; /* # of alloc'ed job slots */
132 static uint_t n_refreshq_jobs
= 0; /* # of unprocessed jobs */
133 static uint_t refreshq_next_job
= 0; /* oldest unprocessed job */
134 static uint_t refreshq_next_slot
= 0; /* next available job slot */
138 * Static function declarations
140 static void libpiclsnmp_init(void);
142 static int lookup_int(char *, int, int *, int);
143 static int lookup_str(char *, int, char **, int);
144 static int lookup_bitstr(char *, int, uchar_t
**, uint_t
*, int);
146 static oidgroup_t
*locate_oid_group(struct picl_snmphdl
*, char *);
147 static int search_oid_in_group(char *, char *, int);
149 static snmp_pdu_t
*fetch_single(struct picl_snmphdl
*, char *, int, int *);
150 static snmp_pdu_t
*fetch_next(struct picl_snmphdl
*, char *, int, int *);
151 static void fetch_bulk(struct picl_snmphdl
*, char *, int, int, int, int *);
152 static int fetch_single_str(struct picl_snmphdl
*, char *, int,
154 static int fetch_single_int(struct picl_snmphdl
*, char *, int,
156 static int fetch_single_bitstr(struct picl_snmphdl
*, char *, int,
157 uchar_t
**, uint_t
*, int *);
159 static int snmp_send_request(struct picl_snmphdl
*, snmp_pdu_t
*, int *);
160 static int snmp_recv_reply(struct picl_snmphdl
*, snmp_pdu_t
*, int *);
162 static int mibcache_realloc(int);
163 static void mibcache_populate(snmp_pdu_t
*, int);
164 static char *oid_to_oidstr(oid
*, size_t);
166 static int refreshq_realloc(int);
167 static int refreshq_add_job(struct picl_snmphdl
*, char *, int, int);
171 libpiclsnmp_init(void)
173 (void) mutex_init(&mibcache_lock
, USYNC_THREAD
, NULL
);
174 if (mibcache_realloc(0) < 0)
175 (void) mutex_destroy(&mibcache_lock
);
177 (void) mutex_init(&refreshq_lock
, USYNC_THREAD
, NULL
);
178 (void) mutex_init(&snmp_reqid_lock
, USYNC_THREAD
, NULL
);
186 struct picl_snmphdl
*smd
;
188 int sbuf
= (1 << 15); /* 16K */
189 int rbuf
= (1 << 17); /* 64K */
190 char *snmp_agent_addr
;
193 smd
= (struct picl_snmphdl
*)calloc(1, sizeof (struct picl_snmphdl
));
198 if ((snmp_agent_addr
= getenv("SNMP_AGENT_IPADDR")) == NULL
)
201 if ((smd
->fd
= socket(PF_INET
, SOCK_DGRAM
, 0)) < 0)
204 (void) setsockopt(smd
->fd
, SOL_SOCKET
, SO_SNDBUF
, &sbuf
, sizeof (int));
205 (void) setsockopt(smd
->fd
, SOL_SOCKET
, SO_RCVBUF
, &rbuf
, sizeof (int));
207 memset(&smd
->agent_addr
, 0, sizeof (struct sockaddr_in
));
208 smd
->agent_addr
.sin_family
= AF_INET
;
209 smd
->agent_addr
.sin_port
= htons(SNMP_DEFAULT_PORT
);
210 smd
->agent_addr
.sin_addr
.s_addr
= inet_addr(snmp_agent_addr
);
212 smd
->fd
= open(DS_SNMP_DRIVER
, O_RDWR
);
219 return ((picl_snmphdl_t
)smd
);
223 snmp_fini(picl_snmphdl_t hdl
)
225 struct picl_snmphdl
*smd
= (struct picl_snmphdl
*)hdl
;
229 (void) close(smd
->fd
);
236 snmp_reinit(picl_snmphdl_t hdl
, int clr_linkreset
)
238 struct picl_snmphdl
*smd
= (struct picl_snmphdl
*)hdl
;
242 (void) mutex_lock(&mibcache_lock
);
244 for (i
= 0; i
< n_mibcache_rows
; i
++) {
245 if ((nvl
= mibcache
[i
]) != NULL
)
255 (void) mutex_unlock(&mibcache_lock
);
258 if (smd
== NULL
|| smd
->fd
< 0)
261 return (ioctl(smd
->fd
, DSSNMP_CLRLNKRESET
, NULL
));
268 snmp_register_group(picl_snmphdl_t hdl
, char *oidstrs
, int n_oids
, int is_vol
)
270 struct picl_snmphdl
*smd
= (struct picl_snmphdl
*)hdl
;
272 oidgroup_t
*curr
, *prev
;
277 * Allocate a new oidgroup_t
279 oidg
= (oidgroup_t
*)calloc(1, sizeof (struct oidgroup
));
284 * Determine how much space is required to register this group
288 for (i
= 0; i
< n_oids
; i
++) {
294 * Create this oid group
296 if ((p
= (char *)malloc(sz
)) == NULL
) {
301 (void) memcpy(p
, oidstrs
, sz
);
305 oidg
->n_oids
= n_oids
;
306 oidg
->is_volatile
= is_vol
;
309 * Link it to the tail of the list of oid groups
311 for (prev
= NULL
, curr
= smd
->group
; curr
; curr
= curr
->next
)
321 * snmp_get_int() takes in an OID and returns the integer value
322 * of the object referenced in the passed arg. It returns 0 on
323 * success and -1 on failure.
326 snmp_get_int(picl_snmphdl_t hdl
, char *prefix
, int row
, int *val
,
329 struct picl_snmphdl
*smd
= (struct picl_snmphdl
*)hdl
;
334 if (smd
== NULL
|| prefix
== NULL
|| val
== NULL
)
338 * If this item should not be cached, fetch it directly from
339 * the agent using fetch_single_xxx()
341 if ((grp
= locate_oid_group(smd
, prefix
)) == NULL
) {
342 ret
= fetch_single_int(smd
, prefix
, row
, val
, &err
);
351 * is it in the cache ?
353 if (lookup_int(prefix
, row
, val
, grp
->is_volatile
) == 0)
357 * fetch it from the agent and populate the cache
359 fetch_bulk(smd
, grp
->oidstrs
, grp
->n_oids
, row
, grp
->is_volatile
, &err
);
364 * look it up again and return it
366 if (lookup_int(prefix
, row
, val
, grp
->is_volatile
) < 0)
373 * snmp_get_str() takes in an OID and returns the string value
374 * of the object referenced in the passed arg. Memory for the string
375 * is allocated within snmp_get_str() and is expected to be freed by
376 * the caller when it is no longer needed. The function returns 0
377 * on success and -1 on failure.
380 snmp_get_str(picl_snmphdl_t hdl
, char *prefix
, int row
, char **strp
,
383 struct picl_snmphdl
*smd
= (struct picl_snmphdl
*)hdl
;
389 if (smd
== NULL
|| prefix
== NULL
|| strp
== NULL
)
394 * Check if this item is cacheable or not. If not, call
395 * fetch_single_* to get it directly from the agent
397 if ((grp
= locate_oid_group(smd
, prefix
)) == NULL
) {
398 ret
= fetch_single_str(smd
, prefix
, row
, strp
, &err
);
407 * See if it's in the cache already
409 if (lookup_str(prefix
, row
, &val
, grp
->is_volatile
) == 0) {
410 if ((*strp
= strdup(val
)) == NULL
)
417 * Fetch it from the agent and populate cache
419 fetch_bulk(smd
, grp
->oidstrs
, grp
->n_oids
, row
, grp
->is_volatile
, &err
);
426 if (lookup_str(prefix
, row
, &val
, grp
->is_volatile
) < 0)
430 if ((*strp
= strdup(val
)) == NULL
)
437 * snmp_get_bitstr() takes in an OID and returns the bit string value
438 * of the object referenced in the passed args. Memory for the bitstring
439 * is allocated within the function and is expected to be freed by
440 * the caller when it is no longer needed. The function returns 0
441 * on success and -1 on failure.
444 snmp_get_bitstr(picl_snmphdl_t hdl
, char *prefix
, int row
, uchar_t
**bitstrp
,
445 uint_t
*nbytes
, int *snmp_syserr
)
447 struct picl_snmphdl
*smd
= (struct picl_snmphdl
*)hdl
;
453 if (smd
== NULL
|| prefix
== NULL
|| bitstrp
== NULL
|| nbytes
== NULL
)
458 * Check if this item is cacheable or not. If not, call
459 * fetch_single_* to get it directly from the agent
461 if ((grp
= locate_oid_group(smd
, prefix
)) == NULL
) {
462 ret
= fetch_single_bitstr(smd
, prefix
, row
, bitstrp
,
472 * See if it's in the cache already
474 if (lookup_bitstr(prefix
, row
, &val
, nbytes
, grp
->is_volatile
) == 0) {
475 if ((*bitstrp
= (uchar_t
*)calloc(*nbytes
, 1)) == NULL
)
477 (void) memcpy(*bitstrp
, (const void *)val
, *nbytes
);
482 * Fetch it from the agent and populate cache
484 fetch_bulk(smd
, grp
->oidstrs
, grp
->n_oids
, row
, grp
->is_volatile
, &err
);
491 if (lookup_bitstr(prefix
, row
, &val
, nbytes
, grp
->is_volatile
) < 0)
494 if ((*bitstrp
= (uchar_t
*)calloc(*nbytes
, 1)) == NULL
)
496 (void) memcpy(*bitstrp
, (const void *)val
, *nbytes
);
502 * snmp_get_nextrow() is similar in operation to SNMP_GETNEXT, but
503 * only just. In particular, this is only expected to return the next
504 * valid row number for the same object, not its value. Since we don't
505 * have any other means, we use this to determine the number of rows
506 * in the table (and the valid ones). This function returns 0 on success
510 snmp_get_nextrow(picl_snmphdl_t hdl
, char *prefix
, int row
, int *nextrow
,
513 struct picl_snmphdl
*smd
= (struct picl_snmphdl
*)hdl
;
514 snmp_pdu_t
*reply_pdu
;
519 if (smd
== NULL
|| prefix
== NULL
|| nextrow
== NULL
) {
521 *snmp_syserr
= EINVAL
;
526 * The get_nextrow results should *never* go into any cache,
527 * since these relationships are dynamically discovered each time.
529 if ((reply_pdu
= fetch_next(smd
, prefix
, row
, &err
)) == NULL
) {
536 * We are not concerned about the "value" of the lexicographically
537 * next object; we only care about the name of that object and
538 * its row number (and whether such an object exists or not).
540 vp
= reply_pdu
->vars
;
543 * This indicates that we're at the end of the MIB view.
545 if (vp
== NULL
|| vp
->name
== NULL
|| vp
->type
== SNMP_NOSUCHOBJECT
||
546 vp
->type
== SNMP_NOSUCHINSTANCE
|| vp
->type
== SNMP_ENDOFMIBVIEW
) {
547 snmp_free_pdu(reply_pdu
);
549 *snmp_syserr
= ENOSPC
;
554 * need to be able to convert the OID
556 if ((nxt_oidstr
= oid_to_oidstr(vp
->name
, vp
->name_len
- 1)) == NULL
) {
557 snmp_free_pdu(reply_pdu
);
559 *snmp_syserr
= ENOMEM
;
564 * We're on to the next table.
566 if (strcmp(nxt_oidstr
, prefix
) != 0) {
568 snmp_free_pdu(reply_pdu
);
570 *snmp_syserr
= ENOENT
;
575 * Ok, so we've got an oid that's simply the next valid row of the
576 * passed on object, return this row number.
578 *nextrow
= (vp
->name
)[vp
->name_len
-1];
581 snmp_free_pdu(reply_pdu
);
587 * Request ids for snmp messages to the agent are sequenced here.
594 (void) mutex_lock(&snmp_reqid_lock
);
598 (void) mutex_unlock(&snmp_reqid_lock
);
604 lookup_int(char *prefix
, int row
, int *valp
, int is_vol
)
611 (void) mutex_lock(&mibcache_lock
);
613 if (row
>= n_mibcache_rows
) {
614 (void) mutex_unlock(&mibcache_lock
);
618 if (mibcache
[row
] == NULL
) {
619 (void) mutex_unlock(&mibcache_lock
);
624 * If this is a volatile property, we should be searching
625 * for an integer-timestamp pair
628 if (nvlist_lookup_int32_array(mibcache
[row
], prefix
,
629 &val_arr
, &nelem
) != 0) {
630 (void) mutex_unlock(&mibcache_lock
);
633 if (nelem
!= 2 || val_arr
[1] < 0) {
634 (void) mutex_unlock(&mibcache_lock
);
637 now
= GET_SCALED_HRTIME();
638 elapsed
= now
- val_arr
[1];
639 if (elapsed
< 0 || elapsed
> MAX_INCACHE_TIME
) {
640 (void) mutex_unlock(&mibcache_lock
);
644 *valp
= (int)val_arr
[0];
646 if (nvlist_lookup_int32(mibcache
[row
], prefix
, valp
) != 0) {
647 (void) mutex_unlock(&mibcache_lock
);
652 (void) mutex_unlock(&mibcache_lock
);
658 lookup_str(char *prefix
, int row
, char **valp
, int is_vol
)
665 (void) mutex_lock(&mibcache_lock
);
667 if (row
>= n_mibcache_rows
) {
668 (void) mutex_unlock(&mibcache_lock
);
672 if (mibcache
[row
] == NULL
) {
673 (void) mutex_unlock(&mibcache_lock
);
678 * If this is a volatile property, we should be searching
679 * for a string-timestamp pair
682 if (nvlist_lookup_string_array(mibcache
[row
], prefix
,
683 &val_arr
, &nelem
) != 0) {
684 (void) mutex_unlock(&mibcache_lock
);
687 if (nelem
!= 2 || atoi(val_arr
[1]) <= 0) {
688 (void) mutex_unlock(&mibcache_lock
);
691 now
= GET_SCALED_HRTIME();
692 elapsed
= now
- atoi(val_arr
[1]);
693 if (elapsed
< 0 || elapsed
> MAX_INCACHE_TIME
) {
694 (void) mutex_unlock(&mibcache_lock
);
700 if (nvlist_lookup_string(mibcache
[row
], prefix
, valp
) != 0) {
701 (void) mutex_unlock(&mibcache_lock
);
706 (void) mutex_unlock(&mibcache_lock
);
712 lookup_bitstr(char *prefix
, int row
, uchar_t
**valp
, uint_t
*nelem
, int is_vol
)
714 (void) mutex_lock(&mibcache_lock
);
716 if (row
>= n_mibcache_rows
) {
717 (void) mutex_unlock(&mibcache_lock
);
721 if (mibcache
[row
] == NULL
) {
722 (void) mutex_unlock(&mibcache_lock
);
727 * We don't support volatile bit string values yet. The nvlist
728 * functions don't support bitstring arrays like they do charstring
729 * arrays, so we would need to do things in a convoluted way,
730 * probably by attaching the timestamp as part of the byte array
731 * itself. However, the need for volatile bitstrings isn't there
732 * yet, to justify the effort.
735 (void) mutex_unlock(&mibcache_lock
);
739 if (nvlist_lookup_byte_array(mibcache
[row
], prefix
, valp
, nelem
) != 0) {
740 (void) mutex_unlock(&mibcache_lock
);
744 (void) mutex_unlock(&mibcache_lock
);
750 search_oid_in_group(char *prefix
, char *oidstrs
, int n_oids
)
756 for (i
= 0; i
< n_oids
; i
++) {
757 if (strcmp(p
, prefix
) == 0)
767 locate_oid_group(struct picl_snmphdl
*smd
, char *prefix
)
774 if (smd
->group
== NULL
)
777 for (grp
= smd
->group
; grp
; grp
= grp
->next
) {
778 if (search_oid_in_group(prefix
, grp
->oidstrs
,
788 fetch_single_int(struct picl_snmphdl
*smd
, char *prefix
, int row
, int *ival
,
791 snmp_pdu_t
*reply_pdu
;
794 if ((reply_pdu
= fetch_single(smd
, prefix
, row
, snmp_syserr
)) == NULL
)
798 * Note that we don't make any distinction between unsigned int
799 * value and signed int value at this point, since we provide
800 * only snmp_get_int() at the higher level. While it is possible
801 * to provide an entirely separate interface such as snmp_get_uint(),
802 * that's quite unnecessary, because we don't do any interpretation
803 * of the received value. Besides, the sizes of int and uint are
804 * the same and the sizes of all pointers are the same (so val.iptr
805 * would be the same as val.uiptr in pdu_varlist_t). If/when we
806 * violate any of these assumptions, it will be time to add
809 vp
= reply_pdu
->vars
;
810 if (vp
== NULL
|| vp
->val
.iptr
== NULL
) {
811 snmp_free_pdu(reply_pdu
);
815 *ival
= *(vp
->val
.iptr
);
817 snmp_free_pdu(reply_pdu
);
823 fetch_single_str(struct picl_snmphdl
*smd
, char *prefix
, int row
, char **valp
,
826 snmp_pdu_t
*reply_pdu
;
829 if ((reply_pdu
= fetch_single(smd
, prefix
, row
, snmp_syserr
)) == NULL
)
832 vp
= reply_pdu
->vars
;
833 if (vp
== NULL
|| vp
->val
.str
== NULL
) {
834 snmp_free_pdu(reply_pdu
);
838 *valp
= strdup((const char *)(vp
->val
.str
));
840 snmp_free_pdu(reply_pdu
);
846 fetch_single_bitstr(struct picl_snmphdl
*smd
, char *prefix
, int row
,
847 uchar_t
**valp
, uint_t
*nelem
, int *snmp_syserr
)
849 snmp_pdu_t
*reply_pdu
;
852 if ((reply_pdu
= fetch_single(smd
, prefix
, row
, snmp_syserr
)) == NULL
)
855 vp
= reply_pdu
->vars
;
856 if (vp
== NULL
|| vp
->val
.str
== NULL
) {
857 snmp_free_pdu(reply_pdu
);
861 if ((*valp
= (uchar_t
*)calloc(vp
->val_len
, 1)) == NULL
) {
862 snmp_free_pdu(reply_pdu
);
866 *nelem
= vp
->val_len
;
867 (void) memcpy(*valp
, (const void *)(vp
->val
.str
),
868 (size_t)(vp
->val_len
));
870 snmp_free_pdu(reply_pdu
);
876 fetch_single(struct picl_snmphdl
*smd
, char *prefix
, int row
, int *snmp_syserr
)
878 snmp_pdu_t
*pdu
, *reply_pdu
;
880 LOGGET(TAG_CMD_REQUEST
, prefix
, row
);
882 if ((pdu
= snmp_create_pdu(SNMP_MSG_GET
, 0, prefix
, 1, row
)) == NULL
)
885 LOGPDU(TAG_REQUEST_PDU
, pdu
);
887 if (snmp_make_packet(pdu
) < 0) {
892 LOGPKT(TAG_REQUEST_PKT
, pdu
->req_pkt
, pdu
->req_pktsz
);
894 if (snmp_send_request(smd
, pdu
, snmp_syserr
) < 0) {
899 if (snmp_recv_reply(smd
, pdu
, snmp_syserr
) < 0) {
904 LOGPKT(TAG_RESPONSE_PKT
, pdu
->reply_pkt
, pdu
->reply_pktsz
);
906 reply_pdu
= snmp_parse_reply(pdu
->reqid
, pdu
->reply_pkt
,
909 LOGPDU(TAG_RESPONSE_PDU
, reply_pdu
);
917 fetch_bulk(struct picl_snmphdl
*smd
, char *oidstrs
, int n_oids
,
918 int row
, int is_vol
, int *snmp_syserr
)
920 snmp_pdu_t
*pdu
, *reply_pdu
;
923 LOGBULK(TAG_CMD_REQUEST
, n_oids
, oidstrs
, row
);
926 * If we're fetching volatile properties using BULKGET, don't
927 * venture to get multiple rows (passing max_reps=0 will make
928 * snmp_create_pdu() fetch SNMP_DEF_MAX_REPETITIONS rows)
930 max_reps
= is_vol
? 1 : 0;
932 pdu
= snmp_create_pdu(SNMP_MSG_GETBULK
, max_reps
, oidstrs
, n_oids
, row
);
936 LOGPDU(TAG_REQUEST_PDU
, pdu
);
939 * Make an ASN.1 encoded packet from the PDU information
941 if (snmp_make_packet(pdu
) < 0) {
946 LOGPKT(TAG_REQUEST_PKT
, pdu
->req_pkt
, pdu
->req_pktsz
);
949 * Send the request packet to the agent
951 if (snmp_send_request(smd
, pdu
, snmp_syserr
) < 0) {
957 * Receive response from the agent into the reply packet buffer
960 if (snmp_recv_reply(smd
, pdu
, snmp_syserr
) < 0) {
965 LOGPKT(TAG_RESPONSE_PKT
, pdu
->reply_pkt
, pdu
->reply_pktsz
);
968 * Parse the reply, validate the response and create a
969 * reply-PDU out of the information. Populate the mibcache
970 * with the received values.
972 reply_pdu
= snmp_parse_reply(pdu
->reqid
, pdu
->reply_pkt
,
975 LOGPDU(TAG_RESPONSE_PDU
, reply_pdu
);
977 if (reply_pdu
->errstat
== SNMP_ERR_NOERROR
) {
979 /* Add a job to the cache refresh work queue */
980 (void) refreshq_add_job(smd
, oidstrs
, n_oids
,
984 mibcache_populate(reply_pdu
, is_vol
);
987 snmp_free_pdu(reply_pdu
);
994 fetch_next(struct picl_snmphdl
*smd
, char *prefix
, int row
, int *snmp_syserr
)
996 snmp_pdu_t
*pdu
, *reply_pdu
;
998 LOGNEXT(TAG_CMD_REQUEST
, prefix
, row
);
1000 pdu
= snmp_create_pdu(SNMP_MSG_GETNEXT
, 0, prefix
, 1, row
);
1004 LOGPDU(TAG_REQUEST_PDU
, pdu
);
1006 if (snmp_make_packet(pdu
) < 0) {
1011 LOGPKT(TAG_REQUEST_PKT
, pdu
->req_pkt
, pdu
->req_pktsz
);
1013 if (snmp_send_request(smd
, pdu
, snmp_syserr
) < 0) {
1018 if (snmp_recv_reply(smd
, pdu
, snmp_syserr
) < 0) {
1023 LOGPKT(TAG_RESPONSE_PKT
, pdu
->reply_pkt
, pdu
->reply_pktsz
);
1025 reply_pdu
= snmp_parse_reply(pdu
->reqid
, pdu
->reply_pkt
,
1028 LOGPDU(TAG_RESPONSE_PDU
, reply_pdu
);
1036 snmp_send_request(struct picl_snmphdl
*smd
, snmp_pdu_t
*pdu
, int *snmp_syserr
)
1046 if (pdu
== NULL
|| pdu
->req_pkt
== NULL
)
1052 LOGIO(TAG_SENDTO
, smd
->fd
, pdu
->req_pkt
, pdu
->req_pktsz
);
1054 ret
= sendto(smd
->fd
, pdu
->req_pkt
, pdu
->req_pktsz
, 0,
1055 (struct sockaddr
*)&smd
->agent_addr
,
1056 sizeof (struct sockaddr
));
1057 if (ret
< 0 && errno
!= EINTR
) {
1062 LOGIO(TAG_WRITE
, smd
->fd
, pdu
->req_pkt
, pdu
->req_pktsz
);
1064 if (write(smd
->fd
, pdu
->req_pkt
, pdu
->req_pktsz
) < 0) {
1066 *snmp_syserr
= errno
;
1073 snmp_sentbytes
+= pdu
->req_pktsz
;
1080 snmp_recv_reply(struct picl_snmphdl
*smd
, snmp_pdu_t
*pdu
, int *snmp_syserr
)
1082 struct dssnmp_info snmp_info
;
1087 struct sockaddr_in from
;
1092 if (smd
->fd
< 0 || pdu
== NULL
)
1096 if ((pkt
= (uchar_t
*)calloc(1, SNMP_MAX_RECV_PKTSZ
)) == NULL
)
1099 fromlen
= sizeof (struct sockaddr_in
);
1101 LOGIO(TAG_RECVFROM
, smd
->fd
, pkt
, SNMP_MAX_RECV_PKTSZ
);
1103 msgsz
= recvfrom(smd
->fd
, pkt
, SNMP_MAX_RECV_PKTSZ
, 0,
1104 (struct sockaddr
*)&from
, &fromlen
);
1105 if (msgsz
< 0 || msgsz
>= SNMP_MAX_RECV_PKTSZ
) {
1110 pktsz
= (size_t)msgsz
;
1112 LOGIO(TAG_IOCTL
, smd
->fd
, DSSNMP_GETINFO
, &snmp_info
);
1115 * The ioctl will block until we have snmp data available
1117 if (ioctl(smd
->fd
, DSSNMP_GETINFO
, &snmp_info
) < 0) {
1119 *snmp_syserr
= errno
;
1123 pktsz
= snmp_info
.size
;
1124 if ((pkt
= (uchar_t
*)calloc(1, pktsz
)) == NULL
)
1127 LOGIO(TAG_READ
, smd
->fd
, pkt
, pktsz
);
1129 if (read(smd
->fd
, pkt
, pktsz
) < 0) {
1132 *snmp_syserr
= errno
;
1137 pdu
->reply_pkt
= pkt
;
1138 pdu
->reply_pktsz
= pktsz
;
1142 snmp_rcvdbytes
+= pktsz
;
1149 mibcache_realloc(int hint
)
1151 uint_t count
= (uint_t
)hint
;
1157 (void) mutex_lock(&mibcache_lock
);
1159 if (hint
< n_mibcache_rows
) {
1160 (void) mutex_unlock(&mibcache_lock
);
1164 count
= ((count
>> MIBCACHE_BLK_SHIFT
) + 1) << MIBCACHE_BLK_SHIFT
;
1166 p
= (nvlist_t
**)calloc(count
, sizeof (nvlist_t
*));
1168 (void) mutex_unlock(&mibcache_lock
);
1173 (void) memcpy((void *) p
, (void *) mibcache
,
1174 n_mibcache_rows
* sizeof (nvlist_t
*));
1175 free((void *) mibcache
);
1179 n_mibcache_rows
= count
;
1181 (void) mutex_unlock(&mibcache_lock
);
1188 * Scan each variable in the returned PDU's bindings and populate
1189 * the cache appropriately
1192 mibcache_populate(snmp_pdu_t
*pdu
, int is_vol
)
1197 int tod
; /* in secs */
1198 char tod_str
[MAX_INT_LEN
];
1203 * If we're populating volatile properties, we also store a
1204 * timestamp with each property value. When we lookup, we check the
1205 * current time against this timestamp to determine if we need to
1206 * refetch the value or not (refetch if it has been in for far too
1211 tod
= GET_SCALED_HRTIME();
1214 (void) snprintf(tod_str
, MAX_INT_LEN
, "%d", tod
);
1217 sval_arr
[1] = (char *)tod_str
;
1220 for (vp
= pdu
->vars
; vp
; vp
= vp
->nextvar
) {
1221 if (vp
->type
!= ASN_INTEGER
&& vp
->type
!= ASN_OCTET_STR
&&
1222 vp
->type
!= ASN_BIT_STR
) {
1226 if (vp
->name
== NULL
|| vp
->val
.str
== NULL
)
1229 row
= (vp
->name
)[vp
->name_len
-1];
1231 (void) mutex_lock(&mibcache_lock
);
1233 if (row
>= n_mibcache_rows
) {
1234 (void) mutex_unlock(&mibcache_lock
);
1235 if (mibcache_realloc(row
) < 0)
1237 (void) mutex_lock(&mibcache_lock
);
1240 if (mibcache
[row
] == NULL
)
1241 ret
= nvlist_alloc(&mibcache
[row
], NV_UNIQUE_NAME
, 0);
1243 (void) mutex_unlock(&mibcache_lock
);
1249 * Convert the standard OID form into an oid string that
1250 * we can use as the key to lookup. Since we only search
1251 * by the prefix (mibcache is really an array of nvlist_t
1252 * pointers), ignore the leaf subid.
1254 oidstr
= oid_to_oidstr(vp
->name
, vp
->name_len
- 1);
1258 (void) mutex_lock(&mibcache_lock
);
1260 if (vp
->type
== ASN_INTEGER
) {
1262 ival_arr
[0] = *(vp
->val
.iptr
);
1263 (void) nvlist_add_int32_array(mibcache
[row
],
1264 oidstr
, ival_arr
, 2);
1266 (void) nvlist_add_int32(mibcache
[row
],
1267 oidstr
, *(vp
->val
.iptr
));
1270 } else if (vp
->type
== ASN_OCTET_STR
) {
1272 sval_arr
[0] = (char *)vp
->val
.str
;
1273 (void) nvlist_add_string_array(mibcache
[row
],
1274 oidstr
, sval_arr
, 2);
1276 (void) nvlist_add_string(mibcache
[row
],
1277 oidstr
, (const char *)(vp
->val
.str
));
1279 } else if (vp
->type
== ASN_BIT_STR
) {
1281 * We don't support yet bit string objects that are
1285 (void) nvlist_add_byte_array(mibcache
[row
],
1286 oidstr
, (uchar_t
*)(vp
->val
.str
),
1287 (uint_t
)vp
->val_len
);
1290 (void) mutex_unlock(&mibcache_lock
);
1297 oid_to_oidstr(oid
*objid
, size_t n_subids
)
1300 char subid_str
[MAX_INT_LEN
];
1305 * ugly, but for now this will have to do.
1307 oidstr_sz
= sizeof (subid_str
) * n_subids
;
1308 oidstr
= calloc(1, oidstr_sz
);
1310 for (i
= 0; i
< n_subids
; i
++) {
1311 (void) memset(subid_str
, 0, sizeof (subid_str
));
1312 isize
= snprintf(subid_str
, sizeof (subid_str
), "%d",
1314 if (isize
>= sizeof (subid_str
))
1317 (void) strlcat(oidstr
, subid_str
, oidstr_sz
);
1318 if (i
< (n_subids
- 1))
1319 (void) strlcat(oidstr
, ".", oidstr_sz
);
1326 * Expand the refreshq to hold more cache refresh jobs. Caller must already
1327 * hold refreshq_lock mutex. Every expansion of the refreshq will add
1328 * REFRESH_BLK_SZ job slots, rather than expanding by one slot every time more
1332 refreshq_realloc(int hint
)
1334 uint_t count
= (uint_t
)hint
;
1340 if (hint
< n_refreshq_slots
) {
1344 /* Round count up to next multiple of REFRESHQ_BLK_SHIFT */
1345 count
= ((count
>> REFRESHQ_BLK_SHIFT
) + 1) << REFRESHQ_BLK_SHIFT
;
1347 p
= (refreshq_job_t
*)calloc(count
, sizeof (refreshq_job_t
));
1353 if (n_refreshq_jobs
== 0) {
1354 /* Simple case, nothing to copy */
1355 refreshq_next_job
= 0;
1356 refreshq_next_slot
= 0;
1357 } else if (refreshq_next_slot
> refreshq_next_job
) {
1358 /* Simple case, single copy preserves everything */
1359 (void) memcpy((void *) p
,
1360 (void *) &(refreshq
[refreshq_next_job
]),
1361 n_refreshq_jobs
* sizeof (refreshq_job_t
));
1364 * Complex case. The jobs in the refresh queue wrap
1365 * around the end of the array in which they are stored.
1366 * To preserve chronological order in the new allocated
1367 * array, we need to copy the jobs at the end of the old
1368 * array to the beginning of the new one and place the
1369 * jobs from the beginning of the old array after them.
1371 uint_t tail_jobs
, head_jobs
;
1373 tail_jobs
= n_refreshq_slots
- refreshq_next_job
;
1374 head_jobs
= n_refreshq_jobs
- tail_jobs
;
1376 /* Copy the jobs from the end of the old array */
1377 (void) memcpy((void *) p
,
1378 (void *) &(refreshq
[refreshq_next_job
]),
1379 tail_jobs
* sizeof (refreshq_job_t
));
1381 /* Copy the jobs from the beginning of the old array */
1382 (void) memcpy((void *) &(p
[tail_jobs
]),
1383 (void *) &(refreshq
[0]),
1384 head_jobs
* sizeof (refreshq_job_t
));
1386 /* update the job and slot indices to match */
1387 refreshq_next_job
= 0;
1388 refreshq_next_slot
= n_refreshq_jobs
;
1390 free((void *) refreshq
);
1392 /* First initialization */
1393 refreshq_next_job
= 0;
1394 refreshq_next_slot
= 0;
1395 n_refreshq_jobs
= 0;
1399 n_refreshq_slots
= count
;
1405 * Add a new job to the refreshq. If there aren't any open slots, attempt to
1406 * expand the queue first. Return -1 if unable to add the job to the work
1407 * queue, or 0 if the job was added OR if an existing job with the same
1408 * parameters is already pending.
1411 refreshq_add_job(struct picl_snmphdl
*smd
, char *oidstrs
, int n_oids
, int row
)
1416 (void) mutex_lock(&refreshq_lock
);
1419 * Can't do anything without a queue. Either the client never
1420 * initialized the refresh queue or the initial memory allocation
1423 if (refreshq
== NULL
) {
1424 (void) mutex_unlock(&refreshq_lock
);
1429 * If there is already a job pending with the same parameters as the job
1430 * we have been asked to add, we apparently let an entry expire and it
1431 * is now being reloaded. Rather than add another job for the same
1432 * entry, we skip adding the new job and let the existing job address
1435 for (i
= 0, job
= refreshq_next_job
; i
< n_refreshq_jobs
; i
++,
1436 job
= (job
+ 1) % n_refreshq_slots
) {
1437 if ((refreshq
[job
].row
== row
) &&
1438 (refreshq
[job
].n_oids
== n_oids
) &&
1439 (refreshq
[job
].oidstrs
== oidstrs
)) {
1440 (void) mutex_unlock(&refreshq_lock
);
1447 * If the queue is full, we need to expand it
1449 if (n_refreshq_jobs
== n_refreshq_slots
) {
1450 if (refreshq_realloc(n_refreshq_slots
+ 1) < 0) {
1452 * Can't expand the job queue, so we drop this job on
1453 * the floor. No data is lost... we just allow some
1454 * data in the mibcache to expire.
1456 (void) mutex_unlock(&refreshq_lock
);
1462 * There is room in the queue, so add the new job. We are actually
1463 * taking a timestamp for this job that is slightly earlier than when
1464 * the mibcache entry will be updated, but since we're trying to update
1465 * the mibcache entry before it expires anyway, the earlier timestamp
1466 * here is acceptable.
1468 refreshq
[refreshq_next_slot
].smd
= smd
;
1469 refreshq
[refreshq_next_slot
].oidstrs
= oidstrs
;
1470 refreshq
[refreshq_next_slot
].n_oids
= n_oids
;
1471 refreshq
[refreshq_next_slot
].row
= row
;
1472 refreshq
[refreshq_next_slot
].last_fetch_time
= GET_SCALED_HRTIME();
1475 * Update queue management variables
1477 n_refreshq_jobs
+= 1;
1478 refreshq_next_slot
= (refreshq_next_slot
+ 1) % n_refreshq_slots
;
1480 (void) mutex_unlock(&refreshq_lock
);
1486 * Almost all of the refresh code remains dormant unless specifically
1487 * initialized by a client (the exception being that fetch_bulk() will still
1488 * call refreshq_add_job(), but the latter will return without doing anything).
1491 snmp_refresh_init(void)
1495 (void) mutex_lock(&refreshq_lock
);
1497 ret
= refreshq_realloc(0);
1499 (void) mutex_unlock(&refreshq_lock
);
1505 * If the client is going away, we don't want to keep doing refresh work, so
1506 * clean everything up.
1509 snmp_refresh_fini(void)
1511 (void) mutex_lock(&refreshq_lock
);
1513 n_refreshq_jobs
= 0;
1514 n_refreshq_slots
= 0;
1515 refreshq_next_job
= 0;
1516 refreshq_next_slot
= 0;
1520 (void) mutex_unlock(&refreshq_lock
);
1524 * Return the number of seconds remaining before the mibcache entry associated
1525 * with the next job in the queue will expire. Note that this requires
1526 * reversing the scaling normally done on hrtime values. (The need for scaling
1527 * is purely internal, and should be hidden from clients.) If there are no jobs
1528 * in the queue, return -1. If the next job has already expired, return 0.
1531 snmp_refresh_get_next_expiration(void)
1536 (void) mutex_lock(&refreshq_lock
);
1538 if (n_refreshq_jobs
== 0) {
1541 elapsed
= GET_SCALED_HRTIME() -
1542 refreshq
[refreshq_next_job
].last_fetch_time
;
1544 if (elapsed
>= MAX_INCACHE_TIME
) {
1547 ret
= (MAX_INCACHE_TIME
- elapsed
) * HRTIME_SCALE
;
1551 (void) mutex_unlock(&refreshq_lock
);
1557 * Given the number of seconds the client wants to spend on each cyle of
1558 * processing jobs and then sleeping, return a suggestion for the number of jobs
1559 * the client should process, calculated by dividing the client's cycle duration
1560 * by MAX_INCACHE_TIME and multiplying the result by the total number of jobs in
1561 * the queue. (Note that the actual implementation of that calculation is done
1562 * in a different order to avoid losing fractional values during integer
1566 snmp_refresh_get_cycle_hint(int secs
)
1570 (void) mutex_lock(&refreshq_lock
);
1573 * First, we need to scale the client's cycle time to get it into the
1574 * same units we use internally (i.e. tens of seconds). We round up, as
1575 * it makes more sense for the client to process extra jobs than
1576 * insufficient jobs. If the client's desired cycle time is greater
1577 * than MAX_INCACHE_TIME, we just return the current total number of
1580 secs
= (secs
+ HRTIME_SCALE
- 1) / HRTIME_SCALE
;
1582 jobs
= (n_refreshq_jobs
* secs
) / MAX_INCACHE_TIME
;
1583 if (jobs
> n_refreshq_jobs
) {
1584 jobs
= n_refreshq_jobs
;
1587 (void) mutex_unlock(&refreshq_lock
);
1593 * Process the next job on the refresh queue by invoking fetch_bulk() with the
1594 * recorded parameters. Return -1 if no job was processed (e.g. because there
1595 * aren't any available), or 0 if a job was processed. We don't actually care
1596 * if fetch_bulk() fails, since we're just working on cache entry refreshing and
1597 * the worst case result of failing here is a longer delay getting that data the
1598 * next time it is requested.
1601 snmp_refresh_process_job(void)
1603 struct picl_snmphdl
*smd
;
1609 (void) mutex_lock(&refreshq_lock
);
1611 if (n_refreshq_jobs
== 0) {
1612 (void) mutex_unlock(&refreshq_lock
);
1617 smd
= refreshq
[refreshq_next_job
].smd
;
1618 oidstrs
= refreshq
[refreshq_next_job
].oidstrs
;
1619 n_oids
= refreshq
[refreshq_next_job
].n_oids
;
1620 row
= refreshq
[refreshq_next_job
].row
;
1622 refreshq_next_job
= (refreshq_next_job
+ 1) % n_refreshq_slots
;
1625 (void) mutex_unlock(&refreshq_lock
);
1629 * fetch_bulk() is going to come right back into the refresh code to add
1630 * a new job for the entry we just loaded, which means we have to make
1631 * the call without holding the refreshq_lock mutex.
1633 fetch_bulk(smd
, oidstrs
, n_oids
, row
, 1, &err
);