Patrick Welche <prlw1@cam.ac.uk>
[netbsd-mini2440.git] / external / bsd / openldap / dist / servers / slapd / back-monitor / cache.c
blob03d1e0755b4b05e16a494052b1cc6b1a880c4be5
1 /* cache.c - routines to maintain an in-core cache of entries */
2 /* $OpenLDAP: pkg/ldap/servers/slapd/back-monitor/cache.c,v 1.27.2.5 2008/05/01 21:25:42 quanah Exp $ */
3 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
5 * Copyright 2001-2008 The OpenLDAP Foundation.
6 * Portions Copyright 2001-2003 Pierangelo Masarati.
7 * All rights reserved.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted only as authorized by the OpenLDAP
11 * Public License.
13 * A copy of this license is available in file LICENSE in the
14 * top-level directory of the distribution or, alternatively, at
15 * <http://www.OpenLDAP.org/license.html>.
17 /* ACKNOWLEDGEMENTS:
18 * This work was initially developed by Pierangelo Masarati for inclusion
19 * in OpenLDAP Software.
22 #include "portable.h"
24 #include <stdio.h>
25 #include "ac/string.h"
27 #include "slap.h"
29 #include "back-monitor.h"
32 * The cache maps DNs to Entries.
33 * Each entry, on turn, holds the list of its children in the e_private field.
34 * This is used by search operation to perform onelevel and subtree candidate
35 * selection.
37 typedef struct monitor_cache_t {
38 struct berval mc_ndn;
39 Entry *mc_e;
40 } monitor_cache_t;
43 * compares entries based on the dn
45 int
46 monitor_cache_cmp(
47 const void *c1,
48 const void *c2 )
50 monitor_cache_t *cc1 = ( monitor_cache_t * )c1;
51 monitor_cache_t *cc2 = ( monitor_cache_t * )c2;
54 * case sensitive, because the dn MUST be normalized
56 return ber_bvcmp( &cc1->mc_ndn, &cc2->mc_ndn );
60 * checks for duplicate entries
62 int
63 monitor_cache_dup(
64 void *c1,
65 void *c2 )
67 monitor_cache_t *cc1 = ( monitor_cache_t * )c1;
68 monitor_cache_t *cc2 = ( monitor_cache_t * )c2;
71 * case sensitive, because the dn MUST be normalized
73 return ber_bvcmp( &cc1->mc_ndn, &cc2->mc_ndn ) == 0 ? -1 : 0;
77 * adds an entry to the cache and inits the mutex
79 int
80 monitor_cache_add(
81 monitor_info_t *mi,
82 Entry *e )
84 monitor_cache_t *mc;
85 monitor_entry_t *mp;
86 int rc;
88 assert( mi != NULL );
89 assert( e != NULL );
91 mp = ( monitor_entry_t *)e->e_private;
93 mc = ( monitor_cache_t * )ch_malloc( sizeof( monitor_cache_t ) );
94 mc->mc_ndn = e->e_nname;
95 mc->mc_e = e;
96 ldap_pvt_thread_mutex_lock( &mi->mi_cache_mutex );
97 rc = avl_insert( &mi->mi_cache, ( caddr_t )mc,
98 monitor_cache_cmp, monitor_cache_dup );
99 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
101 return rc;
105 * locks the entry (no r/w)
108 monitor_cache_lock(
109 Entry *e )
111 monitor_entry_t *mp;
113 assert( e != NULL );
114 assert( e->e_private != NULL );
116 mp = ( monitor_entry_t * )e->e_private;
117 ldap_pvt_thread_mutex_lock( &mp->mp_mutex );
119 return( 0 );
123 * tries to lock the entry (no r/w)
126 monitor_cache_trylock(
127 Entry *e )
129 monitor_entry_t *mp;
131 assert( e != NULL );
132 assert( e->e_private != NULL );
134 mp = ( monitor_entry_t * )e->e_private;
135 return ldap_pvt_thread_mutex_trylock( &mp->mp_mutex );
139 * gets an entry from the cache based on the normalized dn
140 * with mutex locked
143 monitor_cache_get(
144 monitor_info_t *mi,
145 struct berval *ndn,
146 Entry **ep )
148 monitor_cache_t tmp_mc, *mc;
150 assert( mi != NULL );
151 assert( ndn != NULL );
152 assert( ep != NULL );
154 *ep = NULL;
156 tmp_mc.mc_ndn = *ndn;
157 retry:;
158 ldap_pvt_thread_mutex_lock( &mi->mi_cache_mutex );
159 mc = ( monitor_cache_t * )avl_find( mi->mi_cache,
160 ( caddr_t )&tmp_mc, monitor_cache_cmp );
162 if ( mc != NULL ) {
163 /* entry is returned with mutex locked */
164 if ( monitor_cache_trylock( mc->mc_e ) ) {
165 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
166 ldap_pvt_thread_yield();
167 goto retry;
169 *ep = mc->mc_e;
172 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
174 return ( *ep == NULL ? -1 : 0 );
178 * gets an entry from the cache based on the normalized dn
179 * with mutex locked
182 monitor_cache_remove(
183 monitor_info_t *mi,
184 struct berval *ndn,
185 Entry **ep )
187 monitor_cache_t tmp_mc, *mc;
188 struct berval pndn;
190 assert( mi != NULL );
191 assert( ndn != NULL );
192 assert( ep != NULL );
194 *ep = NULL;
196 dnParent( ndn, &pndn );
198 retry:;
199 ldap_pvt_thread_mutex_lock( &mi->mi_cache_mutex );
201 tmp_mc.mc_ndn = *ndn;
202 mc = ( monitor_cache_t * )avl_find( mi->mi_cache,
203 ( caddr_t )&tmp_mc, monitor_cache_cmp );
205 if ( mc != NULL ) {
206 monitor_cache_t *pmc;
208 if ( monitor_cache_trylock( mc->mc_e ) ) {
209 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
210 goto retry;
213 tmp_mc.mc_ndn = pndn;
214 pmc = ( monitor_cache_t * )avl_find( mi->mi_cache,
215 ( caddr_t )&tmp_mc, monitor_cache_cmp );
216 if ( pmc != NULL ) {
217 monitor_entry_t *mp = (monitor_entry_t *)mc->mc_e->e_private,
218 *pmp = (monitor_entry_t *)pmc->mc_e->e_private;
219 Entry **entryp;
221 if ( monitor_cache_trylock( pmc->mc_e ) ) {
222 monitor_cache_release( mi, mc->mc_e );
223 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
224 goto retry;
227 for ( entryp = &pmp->mp_children; *entryp != NULL; ) {
228 monitor_entry_t *next = (monitor_entry_t *)(*entryp)->e_private;
229 if ( next == mp ) {
230 *entryp = next->mp_next;
231 entryp = NULL;
232 break;
235 entryp = &next->mp_next;
238 if ( entryp != NULL ) {
239 Debug( LDAP_DEBUG_ANY,
240 "monitor_cache_remove(\"%s\"): "
241 "not in parent's list\n",
242 ndn->bv_val, 0, 0 );
245 /* either succeeded, and the entry is no longer
246 * in its parent's list, or failed, and the
247 * entry is neither mucked with nor returned */
248 monitor_cache_release( mi, pmc->mc_e );
250 if ( entryp == NULL ) {
251 monitor_cache_t *tmpmc;
253 tmp_mc.mc_ndn = *ndn;
254 tmpmc = avl_delete( &mi->mi_cache,
255 ( caddr_t )&tmp_mc, monitor_cache_cmp );
256 assert( tmpmc == mc );
258 *ep = mc->mc_e;
259 ch_free( mc );
260 mc = NULL;
262 /* NOTE: we destroy the mutex, but otherwise
263 * leave the private data around; specifically,
264 * callbacks need be freed by someone else */
266 ldap_pvt_thread_mutex_destroy( &mp->mp_mutex );
267 mp->mp_next = NULL;
268 mp->mp_children = NULL;
273 if ( mc ) {
274 monitor_cache_release( mi, mc->mc_e );
278 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
280 return ( *ep == NULL ? -1 : 0 );
284 * If the entry exists in cache, it is returned in locked status;
285 * otherwise, if the parent exists, if it may generate volatile
286 * descendants an attempt to generate the required entry is
287 * performed and, if successful, the entry is returned
290 monitor_cache_dn2entry(
291 Operation *op,
292 SlapReply *rs,
293 struct berval *ndn,
294 Entry **ep,
295 Entry **matched )
297 monitor_info_t *mi = (monitor_info_t *)op->o_bd->be_private;
298 int rc;
299 struct berval p_ndn = BER_BVNULL;
300 Entry *e_parent;
301 monitor_entry_t *mp;
303 assert( mi != NULL );
304 assert( ndn != NULL );
305 assert( ep != NULL );
306 assert( matched != NULL );
308 *matched = NULL;
310 if ( !dnIsSuffix( ndn, &op->o_bd->be_nsuffix[ 0 ] ) ) {
311 return( -1 );
314 rc = monitor_cache_get( mi, ndn, ep );
315 if ( !rc && *ep != NULL ) {
316 return( 0 );
319 /* try with parent/ancestors */
320 if ( BER_BVISNULL( ndn ) ) {
321 BER_BVSTR( &p_ndn, "" );
323 } else {
324 dnParent( ndn, &p_ndn );
327 rc = monitor_cache_dn2entry( op, rs, &p_ndn, &e_parent, matched );
328 if ( rc || e_parent == NULL ) {
329 return( -1 );
332 mp = ( monitor_entry_t * )e_parent->e_private;
333 rc = -1;
334 if ( mp->mp_flags & MONITOR_F_VOLATILE_CH ) {
335 /* parent entry generates volatile children */
336 rc = monitor_entry_create( op, rs, ndn, e_parent, ep );
339 if ( !rc ) {
340 monitor_cache_lock( *ep );
341 monitor_cache_release( mi, e_parent );
343 } else {
344 *matched = e_parent;
347 return( rc );
351 * releases the lock of the entry; if it is marked as volatile, it is
352 * destroyed.
355 monitor_cache_release(
356 monitor_info_t *mi,
357 Entry *e )
359 monitor_entry_t *mp;
361 assert( mi != NULL );
362 assert( e != NULL );
363 assert( e->e_private != NULL );
365 mp = ( monitor_entry_t * )e->e_private;
367 if ( mp->mp_flags & MONITOR_F_VOLATILE ) {
368 monitor_cache_t *mc, tmp_mc;
370 /* volatile entries do not return to cache */
371 ldap_pvt_thread_mutex_lock( &mi->mi_cache_mutex );
372 tmp_mc.mc_ndn = e->e_nname;
373 mc = avl_delete( &mi->mi_cache,
374 ( caddr_t )&tmp_mc, monitor_cache_cmp );
375 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
376 if ( mc != NULL ) {
377 ch_free( mc );
380 ldap_pvt_thread_mutex_unlock( &mp->mp_mutex );
381 ldap_pvt_thread_mutex_destroy( &mp->mp_mutex );
382 ch_free( mp );
383 e->e_private = NULL;
384 entry_free( e );
386 return( 0 );
389 ldap_pvt_thread_mutex_unlock( &mp->mp_mutex );
391 return( 0 );
394 static void
395 monitor_entry_destroy( void *v_mc )
397 monitor_cache_t *mc = (monitor_cache_t *)v_mc;
399 if ( mc->mc_e != NULL ) {
400 monitor_entry_t *mp;
402 assert( mc->mc_e->e_private != NULL );
404 mp = ( monitor_entry_t * )mc->mc_e->e_private;
406 if ( mp->mp_cb ) {
407 monitor_callback_t *cb;
409 for ( cb = mp->mp_cb; cb != NULL; ) {
410 monitor_callback_t *next = cb->mc_next;
412 if ( cb->mc_free ) {
413 (void)cb->mc_free( mc->mc_e, &cb->mc_private );
415 ch_free( mp->mp_cb );
417 cb = next;
421 ldap_pvt_thread_mutex_destroy( &mp->mp_mutex );
423 ch_free( mp );
424 mc->mc_e->e_private = NULL;
425 entry_free( mc->mc_e );
428 ch_free( mc );
432 monitor_cache_destroy(
433 monitor_info_t *mi )
435 if ( mi->mi_cache ) {
436 avl_free( mi->mi_cache, monitor_entry_destroy );
439 return 0;