1 /* idl.c - ldap id list handling routines */
2 /* $OpenLDAP: pkg/ldap/servers/slapd/back-bdb/idl.c,v 1.124.2.7 2008/02/11 23:26:45 kurt Exp $ */
3 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
5 * Copyright 2000-2008 The OpenLDAP Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted only as authorized by the OpenLDAP
12 * A copy of this license is available in the file LICENSE in the
13 * top-level directory of the distribution or, alternatively, at
14 * <http://www.OpenLDAP.org/license.html>.
20 #include <ac/string.h>
25 #define IDL_MAX(x,y) ( x > y ? x : y )
26 #define IDL_MIN(x,y) ( x < y ? x : y )
28 #define IDL_CMP(x,y) ( x < y ? -1 : ( x > y ? 1 : 0 ) )
30 #define IDL_LRU_DELETE( bdb, e ) do { \
31 if ( (e) == (bdb)->bi_idl_lru_head ) { \
32 if ( (e)->idl_lru_next == (bdb)->bi_idl_lru_head ) { \
33 (bdb)->bi_idl_lru_head = NULL; \
35 (bdb)->bi_idl_lru_head = (e)->idl_lru_next; \
38 if ( (e) == (bdb)->bi_idl_lru_tail ) { \
39 if ( (e)->idl_lru_prev == (bdb)->bi_idl_lru_tail ) { \
40 assert( (bdb)->bi_idl_lru_head == NULL ); \
41 (bdb)->bi_idl_lru_tail = NULL; \
43 (bdb)->bi_idl_lru_tail = (e)->idl_lru_prev; \
46 (e)->idl_lru_next->idl_lru_prev = (e)->idl_lru_prev; \
47 (e)->idl_lru_prev->idl_lru_next = (e)->idl_lru_next; \
51 bdb_idl_entry_cmp( const void *v_idl1
, const void *v_idl2
)
53 const bdb_idl_cache_entry_t
*idl1
= v_idl1
, *idl2
= v_idl2
;
56 if ((rc
= SLAP_PTRCMP( idl1
->db
, idl2
->db
))) return rc
;
57 if ((rc
= idl1
->kstr
.bv_len
- idl2
->kstr
.bv_len
)) return rc
;
58 return ( memcmp ( idl1
->kstr
.bv_val
, idl2
->kstr
.bv_val
, idl1
->kstr
.bv_len
) );
62 static void idl_check( ID
*ids
)
64 if( BDB_IDL_IS_RANGE( ids
) ) {
65 assert( BDB_IDL_RANGE_FIRST(ids
) <= BDB_IDL_RANGE_LAST(ids
) );
68 for( i
=1; i
< ids
[0]; i
++ ) {
69 assert( ids
[i
+1] > ids
[i
] );
75 static void idl_dump( ID
*ids
)
77 if( BDB_IDL_IS_RANGE( ids
) ) {
78 Debug( LDAP_DEBUG_ANY
,
79 "IDL: range ( %ld - %ld )\n",
80 (long) BDB_IDL_RANGE_FIRST( ids
),
81 (long) BDB_IDL_RANGE_LAST( ids
) );
85 Debug( LDAP_DEBUG_ANY
, "IDL: size %ld", (long) ids
[0], 0, 0 );
87 for( i
=1; i
<=ids
[0]; i
++ ) {
89 Debug( LDAP_DEBUG_ANY
, "\n", 0, 0, 0 );
91 Debug( LDAP_DEBUG_ANY
, " %02lx", (long) ids
[i
], 0, 0 );
94 Debug( LDAP_DEBUG_ANY
, "\n", 0, 0, 0 );
99 #endif /* IDL_DEBUG > 1 */
100 #endif /* IDL_DEBUG > 0 */
102 unsigned bdb_idl_search( ID
*ids
, ID id
)
104 #define IDL_BINARY_SEARCH 1
105 #ifdef IDL_BINARY_SEARCH
107 * binary search of id in ids
108 * if found, returns position of id
109 * if not found, returns first postion greater than id
122 cursor
= base
+ pivot
;
123 val
= IDL_CMP( id
, ids
[cursor
+ 1] );
128 } else if ( val
> 0 ) {
144 /* (reverse) linear search */
151 for( i
=ids
[0]; i
; i
-- ) {
161 int bdb_idl_insert( ID
*ids
, ID id
)
166 Debug( LDAP_DEBUG_ANY
, "insert: %04lx at %d\n", (long) id
, x
, 0 );
172 if (BDB_IDL_IS_RANGE( ids
)) {
173 /* if already in range, treat as a dup */
174 if (id
>= BDB_IDL_FIRST(ids
) && id
<= BDB_IDL_LAST(ids
))
176 if (id
< BDB_IDL_FIRST(ids
))
178 else if (id
> BDB_IDL_LAST(ids
))
183 x
= bdb_idl_search( ids
, id
);
191 if ( x
<= ids
[0] && ids
[x
] == id
) {
196 if ( ++ids
[0] >= BDB_IDL_DB_MAX
) {
199 ids
[2] = ids
[ids
[0]-1];
200 } else if ( ids
[ids
[0]-1] < id
) {
203 ids
[2] = ids
[ids
[0]-1];
209 AC_MEMCPY( &ids
[x
+1], &ids
[x
], (ids
[0]-x
) * sizeof(ID
) );
222 static int bdb_idl_delete( ID
*ids
, ID id
)
227 Debug( LDAP_DEBUG_ANY
, "delete: %04lx at %d\n", (long) id
, x
, 0 );
233 if (BDB_IDL_IS_RANGE( ids
)) {
234 /* If deleting a range boundary, adjust */
237 else if ( ids
[2] == id
)
239 /* deleting from inside a range is a no-op */
241 /* If the range has collapsed, re-adjust */
242 if ( ids
[1] > ids
[2] )
244 else if ( ids
[1] == ids
[2] )
249 x
= bdb_idl_search( ids
, id
);
257 if( x
> ids
[0] || ids
[x
] != id
) {
261 } else if ( --ids
[0] == 0 ) {
267 AC_MEMCPY( &ids
[x
], &ids
[x
+1], (1+ids
[0]-x
) * sizeof(ID
) );
284 if ( key
->size
== 4 /* LUTIL_HASH_BYTES */ ) {
285 unsigned char *c
= key
->data
;
286 sprintf( buf
, "[%02x%02x%02x%02x]", c
[0], c
[1], c
[2], c
[3] );
293 /* Find a db/key pair in the IDL cache. If ids is non-NULL,
294 * copy the cached IDL into it, otherwise just return the status.
298 struct bdb_info
*bdb
,
303 bdb_idl_cache_entry_t idl_tmp
;
304 bdb_idl_cache_entry_t
*matched_idl_entry
;
305 int rc
= LDAP_NO_SUCH_OBJECT
;
307 DBT2bv( key
, &idl_tmp
.kstr
);
309 ldap_pvt_thread_rdwr_rlock( &bdb
->bi_idl_tree_rwlock
);
310 matched_idl_entry
= avl_find( bdb
->bi_idl_tree
, &idl_tmp
,
312 if ( matched_idl_entry
!= NULL
) {
313 if ( matched_idl_entry
->idl
&& ids
)
314 BDB_IDL_CPY( ids
, matched_idl_entry
->idl
);
315 matched_idl_entry
->idl_flags
|= CACHE_ENTRY_REFERENCED
;
316 if ( matched_idl_entry
->idl
)
321 ldap_pvt_thread_rdwr_runlock( &bdb
->bi_idl_tree_rwlock
);
328 struct bdb_info
*bdb
,
334 bdb_idl_cache_entry_t idl_tmp
;
335 bdb_idl_cache_entry_t
*ee
, *eprev
;
337 if ( rc
== DB_NOTFOUND
|| BDB_IDL_IS_ZERO( ids
))
340 DBT2bv( key
, &idl_tmp
.kstr
);
342 ee
= (bdb_idl_cache_entry_t
*) ch_malloc(
343 sizeof( bdb_idl_cache_entry_t
) );
345 ee
->idl
= (ID
*) ch_malloc( BDB_IDL_SIZEOF ( ids
) );
346 BDB_IDL_CPY( ee
->idl
, ids
);
348 ee
->idl_lru_prev
= NULL
;
349 ee
->idl_lru_next
= NULL
;
351 ber_dupbv( &ee
->kstr
, &idl_tmp
.kstr
);
352 ldap_pvt_thread_rdwr_wlock( &bdb
->bi_idl_tree_rwlock
);
353 if ( avl_insert( &bdb
->bi_idl_tree
, (caddr_t
) ee
,
354 bdb_idl_entry_cmp
, avl_dup_error
))
356 ch_free( ee
->kstr
.bv_val
);
359 ldap_pvt_thread_rdwr_wunlock( &bdb
->bi_idl_tree_rwlock
);
362 ldap_pvt_thread_mutex_lock( &bdb
->bi_idl_tree_lrulock
);
364 if ( bdb
->bi_idl_lru_head
) {
365 assert( bdb
->bi_idl_lru_tail
!= NULL
);
366 assert( bdb
->bi_idl_lru_head
->idl_lru_prev
!= NULL
);
367 assert( bdb
->bi_idl_lru_head
->idl_lru_next
!= NULL
);
369 ee
->idl_lru_next
= bdb
->bi_idl_lru_head
;
370 ee
->idl_lru_prev
= bdb
->bi_idl_lru_head
->idl_lru_prev
;
371 bdb
->bi_idl_lru_head
->idl_lru_prev
->idl_lru_next
= ee
;
372 bdb
->bi_idl_lru_head
->idl_lru_prev
= ee
;
374 ee
->idl_lru_next
= ee
->idl_lru_prev
= ee
;
375 bdb
->bi_idl_lru_tail
= ee
;
377 bdb
->bi_idl_lru_head
= ee
;
379 if ( ++bdb
->bi_idl_cache_size
> bdb
->bi_idl_cache_max_size
) {
381 ee
= bdb
->bi_idl_lru_tail
;
382 for ( i
= 0; ee
!= NULL
&& i
< 10; i
++, ee
= eprev
) {
383 eprev
= ee
->idl_lru_prev
;
387 if ( ee
->idl_flags
& CACHE_ENTRY_REFERENCED
) {
388 ee
->idl_flags
^= CACHE_ENTRY_REFERENCED
;
391 if ( avl_delete( &bdb
->bi_idl_tree
, (caddr_t
) ee
,
392 bdb_idl_entry_cmp
) == NULL
) {
393 Debug( LDAP_DEBUG_ANY
, "=> bdb_idl_cache_put: "
394 "AVL delete failed\n",
397 IDL_LRU_DELETE( bdb
, ee
);
399 --bdb
->bi_idl_cache_size
;
400 ch_free( ee
->kstr
.bv_val
);
404 bdb
->bi_idl_lru_tail
= eprev
;
405 assert( bdb
->bi_idl_lru_tail
!= NULL
406 || bdb
->bi_idl_lru_head
== NULL
);
408 ldap_pvt_thread_mutex_unlock( &bdb
->bi_idl_tree_lrulock
);
409 ldap_pvt_thread_rdwr_wunlock( &bdb
->bi_idl_tree_rwlock
);
414 struct bdb_info
*bdb
,
418 bdb_idl_cache_entry_t
*matched_idl_entry
, idl_tmp
;
419 DBT2bv( key
, &idl_tmp
.kstr
);
421 ldap_pvt_thread_rdwr_wlock( &bdb
->bi_idl_tree_rwlock
);
422 matched_idl_entry
= avl_find( bdb
->bi_idl_tree
, &idl_tmp
,
424 if ( matched_idl_entry
!= NULL
) {
425 if ( avl_delete( &bdb
->bi_idl_tree
, (caddr_t
) matched_idl_entry
,
426 bdb_idl_entry_cmp
) == NULL
) {
427 Debug( LDAP_DEBUG_ANY
, "=> bdb_idl_cache_del: "
428 "AVL delete failed\n",
431 --bdb
->bi_idl_cache_size
;
432 ldap_pvt_thread_mutex_lock( &bdb
->bi_idl_tree_lrulock
);
433 IDL_LRU_DELETE( bdb
, matched_idl_entry
);
434 ldap_pvt_thread_mutex_unlock( &bdb
->bi_idl_tree_lrulock
);
435 free( matched_idl_entry
->kstr
.bv_val
);
436 if ( matched_idl_entry
->idl
)
437 free( matched_idl_entry
->idl
);
438 free( matched_idl_entry
);
440 ldap_pvt_thread_rdwr_wunlock( &bdb
->bi_idl_tree_rwlock
);
444 bdb_idl_cache_add_id(
445 struct bdb_info
*bdb
,
450 bdb_idl_cache_entry_t
*cache_entry
, idl_tmp
;
451 DBT2bv( key
, &idl_tmp
.kstr
);
453 ldap_pvt_thread_rdwr_wlock( &bdb
->bi_idl_tree_rwlock
);
454 cache_entry
= avl_find( bdb
->bi_idl_tree
, &idl_tmp
,
456 if ( cache_entry
!= NULL
) {
457 if ( !BDB_IDL_IS_RANGE( cache_entry
->idl
) &&
458 cache_entry
->idl
[0] < BDB_IDL_DB_MAX
) {
459 size_t s
= BDB_IDL_SIZEOF( cache_entry
->idl
) + sizeof(ID
);
460 cache_entry
->idl
= ch_realloc( cache_entry
->idl
, s
);
462 bdb_idl_insert( cache_entry
->idl
, id
);
464 ldap_pvt_thread_rdwr_wunlock( &bdb
->bi_idl_tree_rwlock
);
468 bdb_idl_cache_del_id(
469 struct bdb_info
*bdb
,
474 bdb_idl_cache_entry_t
*cache_entry
, idl_tmp
;
475 DBT2bv( key
, &idl_tmp
.kstr
);
477 ldap_pvt_thread_rdwr_wlock( &bdb
->bi_idl_tree_rwlock
);
478 cache_entry
= avl_find( bdb
->bi_idl_tree
, &idl_tmp
,
480 if ( cache_entry
!= NULL
) {
481 bdb_idl_delete( cache_entry
->idl
, id
);
482 if ( cache_entry
->idl
[0] == 0 ) {
483 if ( avl_delete( &bdb
->bi_idl_tree
, (caddr_t
) cache_entry
,
484 bdb_idl_entry_cmp
) == NULL
) {
485 Debug( LDAP_DEBUG_ANY
, "=> bdb_idl_cache_del: "
486 "AVL delete failed\n",
489 --bdb
->bi_idl_cache_size
;
490 ldap_pvt_thread_mutex_lock( &bdb
->bi_idl_tree_lrulock
);
491 IDL_LRU_DELETE( bdb
, cache_entry
);
492 ldap_pvt_thread_mutex_unlock( &bdb
->bi_idl_tree_lrulock
);
493 free( cache_entry
->kstr
.bv_val
);
494 free( cache_entry
->idl
);
498 ldap_pvt_thread_rdwr_wunlock( &bdb
->bi_idl_tree_rwlock
);
511 struct bdb_info
*bdb
= (struct bdb_info
*) be
->be_private
;
513 DBT data
, key2
, *kptr
;
519 int flags
= bdb
->bi_db_opflags
| DB_MULTIPLE
;
522 /* If using BerkeleyDB 4.0, the buf must be large enough to
523 * grab the entire IDL in one get(), otherwise BDB will leak
524 * resources on subsequent get's. We can safely call get()
525 * twice - once for the data, and once to get the DB_NOTFOUND
526 * result meaning there's no more data. See ITS#2040 for details.
527 * This bug is fixed in BDB 4.1 so a smaller buffer will work if
528 * stack space is too limited.
530 * configure now requires Berkeley DB 4.1.
532 #if DB_VERSION_FULL < 0x04010000
533 # define BDB_ENOUGH 5
535 /* We sometimes test with tiny IDLs, and BDB always wants buffers
536 * that are at least one page in size.
538 # if BDB_IDL_DB_SIZE < 4096
539 # define BDB_ENOUGH 2048
541 # define BDB_ENOUGH 1
544 ID buf
[BDB_IDL_DB_SIZE
*BDB_ENOUGH
];
548 Debug( LDAP_DEBUG_ARGS
,
549 "bdb_idl_fetch_key: %s\n",
550 bdb_show_key( key
, keybuf
), 0, 0 );
552 assert( ids
!= NULL
);
554 if ( saved_cursor
&& *saved_cursor
) {
556 } else if ( get_flag
== LDAP_FILTER_GE
) {
557 opflag
= DB_SET_RANGE
;
558 } else if ( get_flag
== LDAP_FILTER_LE
) {
564 /* only non-range lookups can use the IDL cache */
565 if ( bdb
->bi_idl_cache_size
&& opflag
== DB_SET
) {
566 rc
= bdb_idl_cache_get( bdb
, db
, key
, ids
);
567 if ( rc
!= LDAP_NO_SUCH_OBJECT
) return rc
;
573 data
.ulen
= sizeof(buf
);
574 data
.flags
= DB_DBT_USERMEM
;
576 /* If we're not reusing an existing cursor, get a new one */
577 if( opflag
!= DB_NEXT
) {
578 rc
= db
->cursor( db
, NULL
, &cursor
, bdb
->bi_db_opflags
);
580 Debug( LDAP_DEBUG_ANY
, "=> bdb_idl_fetch_key: "
581 "cursor failed: %s (%d)\n", db_strerror(rc
), rc
, 0 );
584 CURSOR_SETLOCKER( cursor
, locker
);
586 cursor
= *saved_cursor
;
589 /* If this is a LE lookup, save original key so we can determine
590 * when to stop. If this is a GE lookup, save the key since it
591 * will be overwritten.
593 if ( get_flag
== LDAP_FILTER_LE
|| get_flag
== LDAP_FILTER_GE
) {
595 key2
.flags
= DB_DBT_USERMEM
;
596 key2
.ulen
= sizeof(keybuf
);
598 key2
.size
= key
->size
;
599 AC_MEMCPY( keybuf
, key
->data
, key
->size
);
605 rc
= cursor
->c_get( cursor
, kptr
, &data
, flags
| opflag
);
607 /* skip presence key on range inequality lookups */
608 while (rc
== 0 && kptr
->size
!= len
) {
609 rc
= cursor
->c_get( cursor
, kptr
, &data
, flags
| DB_NEXT_NODUP
);
611 /* If we're doing a LE compare and the new key is greater than
612 * our search key, we're done
614 if (rc
== 0 && get_flag
== LDAP_FILTER_LE
&& memcmp( kptr
->data
,
615 key
->data
, key
->size
) > 0 ) {
623 DB_MULTIPLE_INIT( ptr
, &data
);
625 DB_MULTIPLE_NEXT(ptr
, &data
, j
, len
);
631 rc
= cursor
->c_get( cursor
, key
, &data
, flags
| DB_NEXT_DUP
);
633 if ( rc
== DB_NOTFOUND
) rc
= 0;
635 /* On disk, a range is denoted by 0 in the first element */
637 if (ids
[0] != BDB_IDL_RANGE_SIZE
) {
638 Debug( LDAP_DEBUG_ANY
, "=> bdb_idl_fetch_key: "
639 "range size mismatch: expected %d, got %ld\n",
640 BDB_IDL_RANGE_SIZE
, ids
[0], 0 );
641 cursor
->c_close( cursor
);
644 BDB_IDL_RANGE( ids
, ids
[2], ids
[3] );
646 data
.size
= BDB_IDL_SIZEOF(ids
);
649 if ( saved_cursor
&& rc
== 0 ) {
650 if ( !*saved_cursor
)
651 *saved_cursor
= cursor
;
655 rc2
= cursor
->c_close( cursor
);
657 Debug( LDAP_DEBUG_ANY
, "=> bdb_idl_fetch_key: "
658 "close failed: %s (%d)\n", db_strerror(rc2
), rc2
, 0 );
662 if( rc
== DB_NOTFOUND
) {
665 } else if( rc
!= 0 ) {
666 Debug( LDAP_DEBUG_ANY
, "=> bdb_idl_fetch_key: "
667 "get failed: %s (%d)\n",
668 db_strerror(rc
), rc
, 0 );
671 } else if ( data
.size
== 0 || data
.size
% sizeof( ID
) ) {
672 /* size not multiple of ID size */
673 Debug( LDAP_DEBUG_ANY
, "=> bdb_idl_fetch_key: "
674 "odd size: expected %ld multiple, got %ld\n",
675 (long) sizeof( ID
), (long) data
.size
, 0 );
678 } else if ( data
.size
!= BDB_IDL_SIZEOF(ids
) ) {
680 Debug( LDAP_DEBUG_ANY
, "=> bdb_idl_fetch_key: "
681 "get size mismatch: expected %ld, got %ld\n",
682 (long) ((1 + ids
[0]) * sizeof( ID
)), (long) data
.size
, 0 );
686 if ( bdb
->bi_idl_cache_max_size
) {
687 bdb_idl_cache_put( bdb
, db
, key
, ids
, rc
);
702 struct bdb_info
*bdb
= (struct bdb_info
*) be
->be_private
;
706 ID lo
, hi
, nlo
, nhi
, nid
;
711 Debug( LDAP_DEBUG_ARGS
,
712 "bdb_idl_insert_key: %lx %s\n",
713 (long) id
, bdb_show_key( key
, buf
), 0 );
716 assert( id
!= NOID
);
719 data
.size
= sizeof( ID
);
720 data
.ulen
= data
.size
;
721 data
.flags
= DB_DBT_USERMEM
;
723 BDB_ID2DISK( id
, &nid
);
725 rc
= db
->cursor( db
, tid
, &cursor
, bdb
->bi_db_opflags
);
727 Debug( LDAP_DEBUG_ANY
, "=> bdb_idl_insert_key: "
728 "cursor failed: %s (%d)\n", db_strerror(rc
), rc
, 0 );
732 /* Fetch the first data item for this key, to see if it
733 * exists and if it's a range.
735 rc
= cursor
->c_get( cursor
, key
, &data
, DB_SET
);
739 /* not a range, count the number of items */
741 rc
= cursor
->c_count( cursor
, &count
, 0 );
746 if ( count
>= BDB_IDL_DB_MAX
) {
747 /* No room, convert to a range */
751 key2
.dlen
= key2
.ulen
;
752 key2
.flags
|= DB_DBT_PARTIAL
;
754 BDB_DISK2ID( &nlo
, &lo
);
757 rc
= cursor
->c_get( cursor
, &key2
, &data
, DB_NEXT_NODUP
);
758 if ( rc
!= 0 && rc
!= DB_NOTFOUND
) {
759 err
= "c_get next_nodup";
762 if ( rc
== DB_NOTFOUND
) {
763 rc
= cursor
->c_get( cursor
, key
, &data
, DB_LAST
);
769 rc
= cursor
->c_get( cursor
, key
, &data
, DB_PREV
);
775 BDB_DISK2ID( &nhi
, &hi
);
776 /* Update hi/lo if needed, then delete all the items
782 } else if ( id
> hi
) {
787 /* Don't fetch anything, just position cursor */
788 data
.flags
= DB_DBT_USERMEM
| DB_DBT_PARTIAL
;
789 data
.dlen
= data
.ulen
= 0;
790 rc
= cursor
->c_get( cursor
, key
, &data
, DB_SET
);
795 rc
= cursor
->c_del( cursor
, 0 );
797 err
= "c_del range1";
800 /* Delete all the records */
801 for ( i
=1; i
<count
; i
++ ) {
802 rc
= cursor
->c_get( cursor
, &key2
, &data
, DB_NEXT_DUP
);
804 err
= "c_get next_dup";
807 rc
= cursor
->c_del( cursor
, 0 );
813 /* Store the range marker */
814 data
.size
= data
.ulen
= sizeof(ID
);
815 data
.flags
= DB_DBT_USERMEM
;
817 rc
= cursor
->c_put( cursor
, key
, &data
, DB_KEYFIRST
);
823 rc
= cursor
->c_put( cursor
, key
, &data
, DB_KEYLAST
);
829 rc
= cursor
->c_put( cursor
, key
, &data
, DB_KEYLAST
);
835 /* There's room, just store it */
839 /* It's a range, see if we need to rewrite
844 rc
= cursor
->c_get( cursor
, key
, &data
, DB_NEXT_DUP
);
849 BDB_DISK2ID( &nlo
, &lo
);
852 rc
= cursor
->c_get( cursor
, key
, &data
, DB_NEXT_DUP
);
857 BDB_DISK2ID( &nhi
, &hi
);
859 if ( id
< lo
|| id
> hi
) {
860 /* Delete the current lo/hi */
861 rc
= cursor
->c_del( cursor
, 0 );
867 rc
= cursor
->c_put( cursor
, key
, &data
, DB_KEYFIRST
);
874 } else if ( rc
== DB_NOTFOUND
) {
875 put1
: data
.data
= &nid
;
876 rc
= cursor
->c_put( cursor
, key
, &data
, DB_NODUPDATA
);
877 /* Don't worry if it's already there */
878 if ( rc
!= 0 && rc
!= DB_KEYEXIST
) {
883 /* initial c_get failed, nothing was done */
885 Debug( LDAP_DEBUG_ANY
, "=> bdb_idl_insert_key: "
886 "%s failed: %s (%d)\n", err
, db_strerror(rc
), rc
);
887 cursor
->c_close( cursor
);
890 /* If key was added (didn't already exist) and using IDL cache,
891 * update key in IDL cache.
893 if ( !rc
&& bdb
->bi_idl_cache_max_size
) {
894 bdb_idl_cache_add_id( bdb
, db
, key
, id
);
896 rc
= cursor
->c_close( cursor
);
898 Debug( LDAP_DEBUG_ANY
, "=> bdb_idl_insert_key: "
899 "c_close failed: %s (%d)\n",
900 db_strerror(rc
), rc
, 0 );
913 struct bdb_info
*bdb
= (struct bdb_info
*) be
->be_private
;
917 ID lo
, hi
, tmp
, nid
, nlo
, nhi
;
922 Debug( LDAP_DEBUG_ARGS
,
923 "bdb_idl_delete_key: %lx %s\n",
924 (long) id
, bdb_show_key( key
, buf
), 0 );
926 assert( id
!= NOID
);
928 if ( bdb
->bi_idl_cache_size
) {
929 bdb_idl_cache_del( bdb
, db
, key
);
932 BDB_ID2DISK( id
, &nid
);
936 data
.size
= sizeof( id
);
937 data
.ulen
= data
.size
;
938 data
.flags
= DB_DBT_USERMEM
;
940 rc
= db
->cursor( db
, tid
, &cursor
, bdb
->bi_db_opflags
);
942 Debug( LDAP_DEBUG_ANY
, "=> bdb_idl_delete_key: "
943 "cursor failed: %s (%d)\n", db_strerror(rc
), rc
, 0 );
946 /* Fetch the first data item for this key, to see if it
947 * exists and if it's a range.
949 rc
= cursor
->c_get( cursor
, key
, &data
, DB_SET
);
953 /* Not a range, just delete it */
955 /* position to correct item */
957 rc
= cursor
->c_get( cursor
, key
, &data
, DB_GET_BOTH
);
963 rc
= cursor
->c_del( cursor
, 0 );
969 /* It's a range, see if we need to rewrite
973 rc
= cursor
->c_get( cursor
, key
, &data
, DB_NEXT_DUP
);
978 BDB_DISK2ID( &nlo
, &lo
);
980 rc
= cursor
->c_get( cursor
, key
, &data
, DB_NEXT_DUP
);
985 BDB_DISK2ID( &nhi
, &hi
);
986 if ( id
== lo
|| id
== hi
) {
990 } else if ( id
== hi
) {
995 /* The range has collapsed... */
996 rc
= db
->del( db
, tid
, key
, 0 );
1003 /* reposition on lo slot */
1005 cursor
->c_get( cursor
, key
, &data
, DB_PREV
);
1007 rc
= cursor
->c_del( cursor
, 0 );
1014 BDB_ID2DISK( id
, &nid
);
1016 rc
= cursor
->c_put( cursor
, key
, &data
, DB_KEYFIRST
);
1018 err
= "c_put lo/hi";
1025 /* initial c_get failed, nothing was done */
1027 if ( rc
!= DB_NOTFOUND
) {
1028 Debug( LDAP_DEBUG_ANY
, "=> bdb_idl_delete_key: "
1029 "%s failed: %s (%d)\n", err
, db_strerror(rc
), rc
);
1031 cursor
->c_close( cursor
);
1034 rc
= cursor
->c_close( cursor
);
1036 Debug( LDAP_DEBUG_ANY
,
1037 "=> bdb_idl_delete_key: c_close failed: %s (%d)\n",
1038 db_strerror(rc
), rc
, 0 );
1046 * idl_intersection - return a = a intersection b
1049 bdb_idl_intersection(
1055 ID cursora
= 0, cursorb
= 0, cursorc
;
1058 if ( BDB_IDL_IS_ZERO( a
) || BDB_IDL_IS_ZERO( b
) ) {
1063 idmin
= IDL_MAX( BDB_IDL_FIRST(a
), BDB_IDL_FIRST(b
) );
1064 idmax
= IDL_MIN( BDB_IDL_LAST(a
), BDB_IDL_LAST(b
) );
1065 if ( idmin
> idmax
) {
1068 } else if ( idmin
== idmax
) {
1074 if ( BDB_IDL_IS_RANGE( a
) ) {
1075 if ( BDB_IDL_IS_RANGE(b
) ) {
1076 /* If both are ranges, just shrink the boundaries */
1081 /* Else swap so that b is the range, a is a list */
1089 /* If a range completely covers the list, the result is
1090 * just the list. If idmin to idmax is contiguous, just
1091 * turn it into a range.
1093 if ( BDB_IDL_IS_RANGE( b
)
1094 && BDB_IDL_FIRST( b
) <= BDB_IDL_FIRST( a
)
1095 && BDB_IDL_LAST( b
) >= BDB_IDL_LAST( a
) ) {
1096 if (idmax
- idmin
+ 1 == a
[0])
1105 /* Fine, do the intersection one element at a time.
1106 * First advance to idmin in both IDLs.
1108 cursora
= cursorb
= idmin
;
1109 ida
= bdb_idl_first( a
, &cursora
);
1110 idb
= bdb_idl_first( b
, &cursorb
);
1113 while( ida
<= idmax
|| idb
<= idmax
) {
1116 ida
= bdb_idl_next( a
, &cursora
);
1117 idb
= bdb_idl_next( b
, &cursorb
);
1118 } else if ( ida
< idb
) {
1119 ida
= bdb_idl_next( a
, &cursora
);
1121 idb
= bdb_idl_next( b
, &cursorb
);
1127 BDB_IDL_CPY( b
, a
);
1134 * idl_union - return a = a union b
1142 ID cursora
= 0, cursorb
= 0, cursorc
;
1144 if ( BDB_IDL_IS_ZERO( b
) ) {
1148 if ( BDB_IDL_IS_ZERO( a
) ) {
1149 BDB_IDL_CPY( a
, b
);
1153 if ( BDB_IDL_IS_RANGE( a
) || BDB_IDL_IS_RANGE(b
) ) {
1154 over
: ida
= IDL_MIN( BDB_IDL_FIRST(a
), BDB_IDL_FIRST(b
) );
1155 idb
= IDL_MAX( BDB_IDL_LAST(a
), BDB_IDL_LAST(b
) );
1162 ida
= bdb_idl_first( a
, &cursora
);
1163 idb
= bdb_idl_first( b
, &cursorb
);
1167 /* The distinct elements of a are cat'd to b */
1168 while( ida
!= NOID
|| idb
!= NOID
) {
1170 if( ++cursorc
> BDB_IDL_UM_MAX
) {
1174 ida
= bdb_idl_next( a
, &cursora
);
1178 ida
= bdb_idl_next( a
, &cursora
);
1179 idb
= bdb_idl_next( b
, &cursorb
);
1183 /* b is copied back to a in sorted order */
1188 while (cursorb
<= b
[0] || cursorc
<= a
[0]) {
1193 if (cursorb
<= b
[0] && b
[cursorb
] < idb
)
1194 a
[cursora
++] = b
[cursorb
++];
1207 * bdb_idl_notin - return a intersection ~b (or a minus b)
1216 ID cursora
= 0, cursorb
= 0;
1218 if( BDB_IDL_IS_ZERO( a
) ||
1219 BDB_IDL_IS_ZERO( b
) ||
1220 BDB_IDL_IS_RANGE( b
) )
1222 BDB_IDL_CPY( ids
, a
);
1226 if( BDB_IDL_IS_RANGE( a
) ) {
1227 BDB_IDL_CPY( ids
, a
);
1231 ida
= bdb_idl_first( a
, &cursora
),
1232 idb
= bdb_idl_first( b
, &cursorb
);
1236 while( ida
!= NOID
) {
1237 if ( idb
== NOID
) {
1238 /* we could shortcut this */
1239 ids
[++ids
[0]] = ida
;
1240 ida
= bdb_idl_next( a
, &cursora
);
1242 } else if ( ida
< idb
) {
1243 ids
[++ids
[0]] = ida
;
1244 ida
= bdb_idl_next( a
, &cursora
);
1246 } else if ( ida
> idb
) {
1247 idb
= bdb_idl_next( b
, &cursorb
);
1250 ida
= bdb_idl_next( a
, &cursora
);
1251 idb
= bdb_idl_next( b
, &cursorb
);
1259 ID
bdb_idl_first( ID
*ids
, ID
*cursor
)
1263 if ( ids
[0] == 0 ) {
1268 if ( BDB_IDL_IS_RANGE( ids
) ) {
1269 if( *cursor
< ids
[1] ) {
1278 pos
= bdb_idl_search( ids
, *cursor
);
1280 if( pos
> ids
[0] ) {
1288 ID
bdb_idl_next( ID
*ids
, ID
*cursor
)
1290 if ( BDB_IDL_IS_RANGE( ids
) ) {
1291 if( ids
[2] < ++(*cursor
) ) {
1297 if ( ++(*cursor
) <= ids
[0] ) {
1298 return ids
[*cursor
];
1306 /* Add one ID to an unsorted list. We ensure that the first element is the
1307 * minimum and the last element is the maximum, for fast range compaction.
1308 * this means IDLs up to length 3 are always sorted...
1310 int bdb_idl_append_one( ID
*ids
, ID id
)
1312 if (BDB_IDL_IS_RANGE( ids
)) {
1313 /* if already in range, treat as a dup */
1314 if (id
>= BDB_IDL_FIRST(ids
) && id
<= BDB_IDL_LAST(ids
))
1316 if (id
< BDB_IDL_FIRST(ids
))
1318 else if (id
> BDB_IDL_LAST(ids
))
1330 if ( ids
[0] > 1 && id
< ids
[ids
[0]] ) {
1337 if ( ids
[0] >= BDB_IDL_UM_MAX
) {
1346 /* Append sorted list b to sorted list a. The result is unsorted but
1347 * a[1] is the min of the result and a[a[0]] is the max.
1349 int bdb_idl_append( ID
*a
, ID
*b
)
1351 ID ida
, idb
, tmp
, swap
= 0;
1353 if ( BDB_IDL_IS_ZERO( b
) ) {
1357 if ( BDB_IDL_IS_ZERO( a
) ) {
1358 BDB_IDL_CPY( a
, b
);
1362 ida
= BDB_IDL_LAST( a
);
1363 idb
= BDB_IDL_LAST( b
);
1364 if ( BDB_IDL_IS_RANGE( a
) || BDB_IDL_IS_RANGE(b
) ||
1365 a
[0] + b
[0] >= BDB_IDL_UM_MAX
) {
1366 a
[2] = IDL_MAX( ida
, idb
);
1367 a
[1] = IDL_MIN( a
[1], b
[1] );
1372 if ( b
[0] > 1 && ida
> idb
) {
1378 if ( b
[1] < a
[1] ) {
1389 AC_MEMCPY(a
+a
[0]+1, b
+2, i
* sizeof(ID
));
1400 /* Quicksort + Insertion sort for small arrays */
1403 #define SWAP(a,b) itmp=(a);(a)=(b);(b)=itmp
1406 bdb_idl_sort( ID
*ids
, ID
*tmp
)
1408 int *istack
= (int *)tmp
;
1409 int i
,j
,k
,l
,ir
,jstack
;
1412 if ( BDB_IDL_IS_RANGE( ids
))
1419 if (ir
- l
< SMALL
) { /* Insertion sort */
1420 for (j
=l
+1;j
<=ir
;j
++) {
1422 for (i
=j
-1;i
>=1;i
--) {
1423 if (ids
[i
] <= a
) break;
1428 if (jstack
== 0) break;
1429 ir
= istack
[jstack
--];
1430 l
= istack
[jstack
--];
1432 k
= (l
+ ir
) >> 1; /* Choose median of left, center, right */
1433 SWAP(ids
[k
], ids
[l
+1]);
1434 if (ids
[l
] > ids
[ir
]) {
1435 SWAP(ids
[l
], ids
[ir
]);
1437 if (ids
[l
+1] > ids
[ir
]) {
1438 SWAP(ids
[l
+1], ids
[ir
]);
1440 if (ids
[l
] > ids
[l
+1]) {
1441 SWAP(ids
[l
], ids
[l
+1]);
1447 do i
++; while(ids
[i
] < a
);
1448 do j
--; while(ids
[j
] > a
);
1450 SWAP(ids
[i
],ids
[j
]);
1455 if (ir
-i
+1 >= j
-1) {
1456 istack
[jstack
] = ir
;
1457 istack
[jstack
-1] = i
;
1460 istack
[jstack
] = j
-1;
1461 istack
[jstack
-1] = l
;
1470 /* 8 bit Radix sort + insertion sort
1472 * based on code from http://www.cubic.org/docs/radix.htm
1473 * with improvements by mbackes@symas.com and hyc@symas.com
1475 * This code is O(n) but has a relatively high constant factor. For lists
1476 * up to ~50 Quicksort is slightly faster; up to ~100 they are even.
1477 * Much faster than quicksort for lists longer than ~100. Insertion
1478 * sort is actually superior for lists <50.
1481 #define BUCKETS (1<<8)
1485 bdb_idl_sort( ID
*ids
, ID
*tmp
)
1487 int count
, soft_limit
, phase
= 0, size
= ids
[0];
1489 unsigned char *maxv
= (unsigned char *)&ids
[size
];
1491 if ( BDB_IDL_IS_RANGE( ids
))
1494 /* Use insertion sort for small lists */
1495 if ( size
<= SMALL
) {
1499 for (j
=1;j
<=size
;j
++) {
1501 for (i
=j
-1;i
>=1;i
--) {
1502 if (ids
[i
] <= a
) break;
1514 #if BYTE_ORDER == BIG_ENDIAN
1515 for (soft_limit
= 0; !maxv
[soft_limit
]; soft_limit
++);
1517 for (soft_limit
= sizeof(ID
)-1; !maxv
[soft_limit
]; soft_limit
--);
1521 #if BYTE_ORDER == BIG_ENDIAN
1522 count
= sizeof(ID
)-1; count
>= soft_limit
; --count
1524 count
= 0; count
<= soft_limit
; ++count
1527 unsigned int num
[BUCKETS
], * np
, n
, sum
;
1529 ID
*sp
, *source
, *dest
;
1530 unsigned char *bp
, *source_start
;
1532 source
= idls
[phase
]+1;
1533 dest
= idls
[phase
^1]+1;
1534 source_start
= ((unsigned char *) source
) + count
;
1537 for ( i
= BUCKETS
; i
> 0; --i
) *np
++ = 0;
1539 /* count occurences of every byte value */
1541 for ( i
= size
; i
> 0; --i
, bp
+= sizeof(ID
) )
1544 /* transform count into index by summing elements and storing
1549 for ( i
= BUCKETS
; i
> 0; --i
) {
1555 /* fill dest with the right values in the right place */
1558 for ( i
= size
; i
> 0; --i
, bp
+= sizeof(ID
) ) {
1566 /* copy back from temp if needed */
1569 for ( count
= 0; count
< size
; ++count
)
1573 #endif /* Quick vs Radix */
1575 #endif /* BDB_HIER */