2 * Copyright (c) 2004 Apple Computer, Inc.
3 * Copyright (c) 2005 Robert N. M. Watson
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
30 * $P4: //depot/projects/trustedbsd/openbsm/libbsm/bsm_mask.c#13 $
33 #include <sys/types.h>
35 #include <config/config.h>
36 #ifdef HAVE_FULL_QUEUE_H
37 #include <sys/queue.h>
38 #else /* !HAVE_FULL_QUEUE_H */
39 #include <compat/queue.h>
40 #endif /* !HAVE_FULL_QUEUE_H */
42 #include <bsm/libbsm.h>
49 static pthread_mutex_t mutex
= PTHREAD_MUTEX_INITIALIZER
;
50 static int firsttime
= 1;
53 * XXX ev_cache, once created, sticks around until the calling program exits.
54 * This may or may not be a problem as far as absolute memory usage goes, but
55 * at least there don't appear to be any leaks in using the cache.
57 * XXXRW: Note that despite (mutex), load_event_table() could race with
58 * other consumers of the getauevents() API.
60 struct audit_event_map
{
61 char ev_name
[AU_EVENT_NAME_MAX
];
62 char ev_desc
[AU_EVENT_DESC_MAX
];
63 struct au_event_ent ev
;
64 LIST_ENTRY(audit_event_map
) ev_list
;
66 static LIST_HEAD(, audit_event_map
) ev_cache
;
68 static struct audit_event_map
*
69 audit_event_map_alloc(void)
71 struct audit_event_map
*aemp
;
73 aemp
= malloc(sizeof(*aemp
));
76 bzero(aemp
, sizeof(*aemp
));
77 aemp
->ev
.ae_name
= aemp
->ev_name
;
78 aemp
->ev
.ae_desc
= aemp
->ev_desc
;
83 audit_event_map_free(struct audit_event_map
*aemp
)
90 * When reading into the cache fails, we need to flush the entire cache to
91 * prevent it from containing some but not all records.
96 struct audit_event_map
*aemp
;
98 /* XXX: Would assert 'mutex'. */
100 while ((aemp
= LIST_FIRST(&ev_cache
)) != NULL
) {
101 LIST_REMOVE(aemp
, ev_list
);
102 audit_event_map_free(aemp
);
107 load_event_table(void)
109 struct audit_event_map
*aemp
;
110 struct au_event_ent
*ep
;
113 * XXX: Would assert 'mutex'.
114 * Loading of the cache happens only once; dont check if cache is
117 LIST_INIT(&ev_cache
);
118 setauevent(); /* Rewind to beginning of entries. */
120 aemp
= audit_event_map_alloc();
125 ep
= getauevent_r(&aemp
->ev
);
127 LIST_INSERT_HEAD(&ev_cache
, aemp
, ev_list
);
129 audit_event_map_free(aemp
);
130 } while (ep
!= NULL
);
135 * Read the event with the matching event number from the cache.
137 static struct au_event_ent
*
138 read_from_cache(au_event_t event
)
140 struct audit_event_map
*elem
;
142 /* XXX: Would assert 'mutex'. */
144 LIST_FOREACH(elem
, &ev_cache
, ev_list
) {
145 if (elem
->ev
.ae_number
== event
)
153 * Check if the audit event is preselected against the preselection mask.
156 au_preselect(au_event_t event
, au_mask_t
*mask_p
, int sorf
, int flag
)
158 struct au_event_ent
*ev
;
159 au_class_t effmask
= 0;
165 pthread_mutex_lock(&mutex
);
168 if ( -1 == load_event_table()) {
169 pthread_mutex_unlock(&mutex
);
176 if (load_event_table() == -1) {
177 pthread_mutex_unlock(&mutex
);
180 ev
= read_from_cache(event
);
182 case AU_PRS_USECACHE
:
183 ev
= read_from_cache(event
);
189 pthread_mutex_unlock(&mutex
);
192 if (sorf
& AU_PRS_SUCCESS
)
193 effmask
|= (mask_p
->am_success
& ev
->ae_class
);
194 if (sorf
& AU_PRS_FAILURE
)
195 effmask
|= (mask_p
->am_failure
& ev
->ae_class
);
196 pthread_mutex_unlock(&mutex
);