1 /* $NetBSD: hash.c,v 1.31 2009/02/12 06:35:54 lukem Exp $ */
4 * Copyright (c) 1990, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #if HAVE_NBTOOL_CONFIG_H
36 #include "nbtool_config.h"
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: hash.c,v 1.31 2009/02/12 06:35:54 lukem Exp $");
42 #include "namespace.h"
43 #include <sys/param.h>
59 static int alloc_segs(HTAB
*, int);
60 static int flush_meta(HTAB
*);
61 static int hash_access(HTAB
*, ACTION
, DBT
*, DBT
*);
62 static int hash_close(DB
*);
63 static int hash_delete(const DB
*, const DBT
*, uint32_t);
64 static int hash_fd(const DB
*);
65 static int hash_get(const DB
*, const DBT
*, DBT
*, uint32_t);
66 static int hash_put(const DB
*, DBT
*, const DBT
*, uint32_t);
67 static void *hash_realloc(SEGMENT
**, size_t, size_t);
68 static int hash_seq(const DB
*, DBT
*, DBT
*, uint32_t);
69 static int hash_sync(const DB
*, uint32_t);
70 static int hdestroy(HTAB
*);
71 static HTAB
*init_hash(HTAB
*, const char *, const HASHINFO
*);
72 static int init_htab(HTAB
*, size_t);
73 #if BYTE_ORDER == LITTLE_ENDIAN
74 static void swap_header(HTAB
*);
75 static void swap_header_copy(HASHHDR
*, HASHHDR
*);
78 /* Fast arithmetic, relying on powers of 2, */
79 #define MOD(x, y) ((x) & ((y) - 1))
81 #define RETURN_ERROR(ERR, LOC) { save_errno = ERR; goto LOC; }
88 #ifdef HASH_STATISTICS
89 int hash_accesses
, hash_collisions
, hash_expansions
, hash_overflows
;
92 /************************** INTERFACE ROUTINES ***************************/
97 __hash_open(const char *file
, int flags
, mode_t mode
, const HASHINFO
*info
,
103 int bpages
, new_table
, nsegs
, save_errno
;
106 if ((flags
& O_ACCMODE
) == O_WRONLY
) {
111 if (!(hashp
= calloc(1, sizeof(HTAB
))))
116 * Even if user wants write only, we need to be able to read
117 * the actual file, so we need to open it read/write. But, the
118 * field in the hashp structure needs to be accurate so that
119 * we can check accesses.
121 hashp
->flags
= flags
;
124 if (!file
|| (flags
& O_TRUNC
) ||
125 (stat(file
, &statbuf
) && (errno
== ENOENT
))) {
127 errno
= 0; /* Just in case someone looks at errno */
131 if ((hashp
->fp
= open(file
, flags
, mode
)) == -1)
132 RETURN_ERROR(errno
, error0
);
133 if (fcntl(hashp
->fp
, F_SETFD
, FD_CLOEXEC
) == -1)
134 RETURN_ERROR(errno
, error1
);
135 if (fstat(hashp
->fp
, &statbuf
) == -1)
136 RETURN_ERROR(errno
, error1
);
137 new_table
|= statbuf
.st_size
== 0;
140 if (!(hashp
= init_hash(hashp
, file
, info
)))
141 RETURN_ERROR(errno
, error1
);
143 /* Table already exists */
144 if (info
&& info
->hash
)
145 hashp
->hash
= info
->hash
;
147 hashp
->hash
= __default_hash
;
149 hdrsize
= read(hashp
->fp
, &hashp
->hdr
, sizeof(HASHHDR
));
150 #if BYTE_ORDER == LITTLE_ENDIAN
154 RETURN_ERROR(errno
, error1
);
155 if (hdrsize
!= sizeof(HASHHDR
))
156 RETURN_ERROR(EFTYPE
, error1
);
157 /* Verify file type, versions and hash function */
158 if (hashp
->MAGIC
!= HASHMAGIC
)
159 RETURN_ERROR(EFTYPE
, error1
);
160 #define OLDHASHVERSION 1
161 if (hashp
->VERSION
!= HASHVERSION
&&
162 hashp
->VERSION
!= OLDHASHVERSION
)
163 RETURN_ERROR(EFTYPE
, error1
);
164 if (hashp
->hash(CHARKEY
, sizeof(CHARKEY
)) !=
165 (uint32_t)hashp
->H_CHARKEY
)
166 RETURN_ERROR(EFTYPE
, error1
);
168 * Figure out how many segments we need. Max_Bucket is the
169 * maximum bucket number, so the number of buckets is
172 nsegs
= (hashp
->MAX_BUCKET
+ 1 + hashp
->SGSIZE
- 1) /
175 if (alloc_segs(hashp
, nsegs
))
177 * If alloc_segs fails, table will have been destroyed
178 * and errno will have been set.
181 /* Read in bitmaps */
182 bpages
= (hashp
->SPARES
[hashp
->OVFL_POINT
] +
183 (unsigned int)(hashp
->BSIZE
<< BYTE_SHIFT
) - 1) >>
184 (hashp
->BSHIFT
+ BYTE_SHIFT
);
186 hashp
->nmaps
= bpages
;
187 (void)memset(&hashp
->mapp
[0], 0, bpages
* sizeof(uint32_t *));
190 /* Initialize Buffer Manager */
191 if (info
&& info
->cachesize
)
192 __buf_init(hashp
, info
->cachesize
);
194 __buf_init(hashp
, DEF_BUFSIZE
);
196 hashp
->new_file
= new_table
;
197 hashp
->save_file
= file
&& (hashp
->flags
& O_RDWR
);
199 if (!(dbp
= malloc(sizeof(DB
)))) {
205 dbp
->internal
= hashp
;
206 dbp
->close
= hash_close
;
207 dbp
->del
= hash_delete
;
212 dbp
->sync
= hash_sync
;
216 (void)fprintf(stderr
,
217 "%s\n%s%p\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%x\n%s%x\n%s%d\n%s%d\n",
219 "TABLE POINTER ", hashp
,
220 "BUCKET SIZE ", hashp
->BSIZE
,
221 "BUCKET SHIFT ", hashp
->BSHIFT
,
222 "DIRECTORY SIZE ", hashp
->DSIZE
,
223 "SEGMENT SIZE ", hashp
->SGSIZE
,
224 "SEGMENT SHIFT ", hashp
->SSHIFT
,
225 "FILL FACTOR ", hashp
->FFACTOR
,
226 "MAX BUCKET ", hashp
->MAX_BUCKET
,
227 "OVFL POINT ", hashp
->OVFL_POINT
,
228 "LAST FREED ", hashp
->LAST_FREED
,
229 "HIGH MASK ", hashp
->HIGH_MASK
,
230 "LOW MASK ", hashp
->LOW_MASK
,
231 "NSEGS ", hashp
->nsegs
,
232 "NKEYS ", hashp
->NKEYS
);
234 #ifdef HASH_STATISTICS
235 hash_overflows
= hash_accesses
= hash_collisions
= hash_expansions
= 0;
241 (void)close(hashp
->fp
);
258 hashp
= dbp
->internal
;
259 retval
= hdestroy(hashp
);
265 hash_fd(const DB
*dbp
)
272 hashp
= dbp
->internal
;
273 if (hashp
->fp
== -1) {
280 /************************** LOCAL CREATION ROUTINES **********************/
282 init_hash(HTAB
*hashp
, const char *file
, const HASHINFO
*info
)
289 hashp
->LORDER
= BYTE_ORDER
;
290 hashp
->BSIZE
= DEF_BUCKET_SIZE
;
291 hashp
->BSHIFT
= DEF_BUCKET_SHIFT
;
292 hashp
->SGSIZE
= DEF_SEGSIZE
;
293 hashp
->SSHIFT
= DEF_SEGSIZE_SHIFT
;
294 hashp
->DSIZE
= DEF_DIRSIZE
;
295 hashp
->FFACTOR
= DEF_FFACTOR
;
296 hashp
->hash
= __default_hash
;
297 memset(hashp
->SPARES
, 0, sizeof(hashp
->SPARES
));
298 memset(hashp
->BITMAPS
, 0, sizeof (hashp
->BITMAPS
));
300 /* Fix bucket size to be optimal for file system */
302 if (stat(file
, &statbuf
))
305 if (statbuf
.st_blksize
== 0) {
306 /* 0 in 2 cases: upgrade from old to new struct stat or
307 * there is a bug in underlying fs.
309 hashp
->BSIZE
= MIN(MINIX_ST_BLKSIZE
, MAX_BSIZE
);
312 hashp
->BSIZE
= MIN(statbuf
.st_blksize
, MAX_BSIZE
);
314 hashp
->BSHIFT
= __log2((uint32_t)hashp
->BSIZE
);
319 /* Round pagesize up to power of 2 */
320 hashp
->BSHIFT
= __log2(info
->bsize
);
321 hashp
->BSIZE
= 1 << hashp
->BSHIFT
;
322 if (hashp
->BSIZE
> MAX_BSIZE
) {
328 hashp
->FFACTOR
= info
->ffactor
;
330 hashp
->hash
= info
->hash
;
334 if (info
->lorder
!= BIG_ENDIAN
&&
335 info
->lorder
!= LITTLE_ENDIAN
) {
339 hashp
->LORDER
= info
->lorder
;
342 /* init_htab should destroy the table and set errno if it fails */
343 if (init_htab(hashp
, (size_t)nelem
))
349 * This calls alloc_segs which may run out of memory. Alloc_segs will destroy
350 * the table and set errno, so we just pass the error information along.
352 * Returns 0 on No Error
355 init_htab(HTAB
*hashp
, size_t nelem
)
362 * Divide number of elements by the fill factor and determine a
363 * desired number of buckets. Allocate space for the next greater
364 * power of two number of buckets.
366 nelem
= (nelem
- 1) / hashp
->FFACTOR
+ 1;
368 _DBFIT(nelem
, uint32_t);
369 l2
= __log2(MAX((uint32_t)nelem
, 2));
372 hashp
->SPARES
[l2
] = l2
+ 1;
373 hashp
->SPARES
[l2
+ 1] = l2
+ 1;
374 hashp
->OVFL_POINT
= l2
;
375 hashp
->LAST_FREED
= 2;
377 /* First bitmap page is at: splitpoint l2 page offset 1 */
378 if (__ibitmap(hashp
, (int)OADDR_OF(l2
, 1), l2
+ 1, 0))
381 hashp
->MAX_BUCKET
= hashp
->LOW_MASK
= nbuckets
- 1;
382 hashp
->HIGH_MASK
= (nbuckets
<< 1) - 1;
383 /* LINTED constant in conditional context */
384 hashp
->HDRPAGES
= ((MAX(sizeof(HASHHDR
), MINHDRSIZE
) - 1) >>
387 nsegs
= (nbuckets
- 1) / hashp
->SGSIZE
+ 1;
388 nsegs
= 1 << __log2(nsegs
);
390 if (nsegs
> (uint32_t)hashp
->DSIZE
)
391 hashp
->DSIZE
= nsegs
;
392 return (alloc_segs(hashp
, (int)nsegs
));
395 /********************** DESTROY/CLOSE ROUTINES ************************/
398 * Flushes any changes to the file if necessary and destroys the hashp
399 * structure, freeing all allocated space.
402 hdestroy(HTAB
*hashp
)
408 #ifdef HASH_STATISTICS
409 (void)fprintf(stderr
, "hdestroy: accesses %d collisions %d\n",
410 hash_accesses
, hash_collisions
);
411 (void)fprintf(stderr
, "hdestroy: expansions %d\n",
413 (void)fprintf(stderr
, "hdestroy: overflows %d\n",
415 (void)fprintf(stderr
, "keys %d maxp %d segmentcount %d\n",
416 hashp
->NKEYS
, hashp
->MAX_BUCKET
, hashp
->nsegs
);
418 for (i
= 0; i
< NCACHED
; i
++)
419 (void)fprintf(stderr
,
420 "spares[%d] = %d\n", i
, hashp
->SPARES
[i
]);
423 * Call on buffer manager to free buffers, and if required,
424 * write them to disk.
426 if (__buf_free(hashp
, 1, hashp
->save_file
))
429 free(*hashp
->dir
); /* Free initial segments */
430 /* Free extra segments */
431 while (hashp
->exsegs
--)
432 free(hashp
->dir
[--hashp
->nsegs
]);
435 if (flush_meta(hashp
) && !save_errno
)
438 for (i
= 0; i
< hashp
->nmaps
; i
++)
440 free(hashp
->mapp
[i
]);
443 (void)close(hashp
->fp
);
454 * Write modified pages to disk
461 hash_sync(const DB
*dbp
, uint32_t flags
)
473 hashp
= dbp
->internal
;
474 if (!hashp
->save_file
)
476 if (__buf_free(hashp
, 0, 1) || flush_meta(hashp
))
485 * -1 indicates that errno should be set
488 flush_meta(HTAB
*hashp
)
491 #if BYTE_ORDER == LITTLE_ENDIAN
497 if (!hashp
->save_file
)
499 hashp
->MAGIC
= HASHMAGIC
;
500 hashp
->VERSION
= HASHVERSION
;
501 hashp
->H_CHARKEY
= hashp
->hash(CHARKEY
, sizeof(CHARKEY
));
505 #if BYTE_ORDER == LITTLE_ENDIAN
507 swap_header_copy(&hashp
->hdr
, whdrp
);
509 if ((wsize
= pwrite(fp
, whdrp
, sizeof(HASHHDR
), (off_t
)0)) == -1)
512 if (wsize
!= sizeof(HASHHDR
)) {
517 for (i
= 0; i
< NCACHED
; i
++)
519 if (__put_page(hashp
, (char *)(void *)hashp
->mapp
[i
],
520 (u_int
)hashp
->BITMAPS
[i
], 0, 1))
525 /*******************************SEARCH ROUTINES *****************************/
527 * All the access routines return
531 * 1 to indicate an external ERROR (i.e. key not found, etc)
532 * -1 to indicate an internal ERROR (i.e. out of memory, etc)
535 hash_get(const DB
*dbp
, const DBT
*key
, DBT
*data
, uint32_t flag
)
539 hashp
= dbp
->internal
;
541 hashp
->err
= errno
= EINVAL
;
544 return (hash_access(hashp
, HASH_GET
, __UNCONST(key
), data
));
548 hash_put(const DB
*dbp
, DBT
*key
, const DBT
*data
, uint32_t flag
)
552 hashp
= dbp
->internal
;
553 if (flag
&& flag
!= R_NOOVERWRITE
) {
554 hashp
->err
= errno
= EINVAL
;
557 if ((hashp
->flags
& O_ACCMODE
) == O_RDONLY
) {
558 hashp
->err
= errno
= EPERM
;
561 /* LINTED const castaway */
562 return (hash_access(hashp
, flag
== R_NOOVERWRITE
?
563 HASH_PUTNEW
: HASH_PUT
, __UNCONST(key
), __UNCONST(data
)));
567 hash_delete(const DB
*dbp
, const DBT
*key
, uint32_t flag
)
571 hashp
= dbp
->internal
;
572 if (flag
&& flag
!= R_CURSOR
) {
573 hashp
->err
= errno
= EINVAL
;
576 if ((hashp
->flags
& O_ACCMODE
) == O_RDONLY
) {
577 hashp
->err
= errno
= EPERM
;
580 return hash_access(hashp
, HASH_DELETE
, __UNCONST(key
), NULL
);
584 * Assume that hashp has been set in wrapper routine.
587 hash_access(HTAB
*hashp
, ACTION action
, DBT
*key
, DBT
*val
)
590 BUFHEAD
*bufp
, *save_bufp
;
597 #ifdef HASH_STATISTICS
603 kp
= (char *)key
->data
;
604 rbufp
= __get_buf(hashp
, __call_hash(hashp
, kp
, (int)size
), NULL
, 0);
609 /* Pin the bucket chain */
610 rbufp
->flags
|= BUF_PIN
;
611 for (bp
= (uint16_t *)(void *)rbufp
->page
, n
= *bp
++, ndx
= 1; ndx
< n
;)
612 if (bp
[1] >= REAL_KEY
) {
613 /* Real key/data pair */
614 if (size
== (size_t)(off
- *bp
) &&
615 memcmp(kp
, rbufp
->page
+ *bp
, size
) == 0)
618 #ifdef HASH_STATISTICS
623 } else if (bp
[1] == OVFLPAGE
) {
624 rbufp
= __get_buf(hashp
, (uint32_t)*bp
, rbufp
, 0);
626 save_bufp
->flags
&= ~BUF_PIN
;
630 bp
= (uint16_t *)(void *)rbufp
->page
;
634 } else if (bp
[1] < REAL_KEY
) {
636 __find_bigpair(hashp
, rbufp
, ndx
, kp
, (int)size
)) > 0)
641 __find_last_page(hashp
, &bufp
))) {
646 rbufp
= __get_buf(hashp
, (uint32_t)pageno
,
649 save_bufp
->flags
&= ~BUF_PIN
;
653 bp
= (uint16_t *)(void *)rbufp
->page
;
658 save_bufp
->flags
&= ~BUF_PIN
;
667 if (__addel(hashp
, rbufp
, key
, val
)) {
668 save_bufp
->flags
&= ~BUF_PIN
;
671 save_bufp
->flags
&= ~BUF_PIN
;
677 save_bufp
->flags
&= ~BUF_PIN
;
684 save_bufp
->flags
&= ~BUF_PIN
;
687 bp
= (uint16_t *)(void *)rbufp
->page
;
688 if (bp
[ndx
+ 1] < REAL_KEY
) {
689 if (__big_return(hashp
, rbufp
, ndx
, val
, 0))
692 val
->data
= (uint8_t *)rbufp
->page
+ (int)bp
[ndx
+ 1];
693 val
->size
= bp
[ndx
] - bp
[ndx
+ 1];
697 if ((__delpair(hashp
, rbufp
, ndx
)) ||
698 (__addel(hashp
, rbufp
, key
, val
))) {
699 save_bufp
->flags
&= ~BUF_PIN
;
704 if (__delpair(hashp
, rbufp
, ndx
))
710 save_bufp
->flags
&= ~BUF_PIN
;
715 hash_seq(const DB
*dbp
, DBT
*key
, DBT
*data
, uint32_t flag
)
718 BUFHEAD
*bufp
= NULL
; /* XXX: gcc */
722 hashp
= dbp
->internal
;
723 if (flag
&& flag
!= R_FIRST
&& flag
!= R_NEXT
) {
724 hashp
->err
= errno
= EINVAL
;
727 #ifdef HASH_STATISTICS
730 if ((hashp
->cbucket
< 0) || (flag
== R_FIRST
)) {
736 for (bp
= NULL
; !bp
|| !bp
[0]; ) {
737 if (!(bufp
= hashp
->cpage
)) {
738 for (bucket
= hashp
->cbucket
;
739 bucket
<= (uint32_t)hashp
->MAX_BUCKET
;
740 bucket
++, hashp
->cndx
= 1) {
741 bufp
= __get_buf(hashp
, bucket
, NULL
, 0);
745 bp
= (uint16_t *)(void *)bufp
->page
;
749 hashp
->cbucket
= bucket
;
750 if (hashp
->cbucket
> hashp
->MAX_BUCKET
) {
755 bp
= (uint16_t *)(void *)hashp
->cpage
->page
;
757 _DIAGASSERT(bp
!= NULL
);
758 _DIAGASSERT(bufp
!= NULL
);
759 while (bp
[hashp
->cndx
+ 1] == OVFLPAGE
) {
760 bufp
= hashp
->cpage
=
761 __get_buf(hashp
, (uint32_t)bp
[hashp
->cndx
], bufp
,
765 bp
= (uint16_t *)(void *)(bufp
->page
);
774 if (bp
[ndx
+ 1] < REAL_KEY
) {
775 if (__big_keydata(hashp
, bufp
, key
, data
, 1))
778 if (hashp
->cpage
== NULL
)
780 key
->data
= (uint8_t *)hashp
->cpage
->page
+ bp
[ndx
];
781 key
->size
= (ndx
> 1 ? bp
[ndx
- 1] : hashp
->BSIZE
) - bp
[ndx
];
782 data
->data
= (uint8_t *)hashp
->cpage
->page
+ bp
[ndx
+ 1];
783 data
->size
= bp
[ndx
] - bp
[ndx
+ 1];
795 /********************************* UTILITIES ************************/
803 __expand_table(HTAB
*hashp
)
805 uint32_t old_bucket
, new_bucket
;
806 int new_segnum
, spare_ndx
;
809 #ifdef HASH_STATISTICS
812 new_bucket
= ++hashp
->MAX_BUCKET
;
813 old_bucket
= (hashp
->MAX_BUCKET
& hashp
->LOW_MASK
);
815 new_segnum
= new_bucket
>> hashp
->SSHIFT
;
817 /* Check if we need a new segment */
818 if (new_segnum
>= hashp
->nsegs
) {
819 /* Check if we need to expand directory */
820 if (new_segnum
>= hashp
->DSIZE
) {
821 /* Reallocate directory */
822 dirsize
= hashp
->DSIZE
* sizeof(SEGMENT
*);
823 if (!hash_realloc(&hashp
->dir
, dirsize
, dirsize
<< 1))
825 hashp
->DSIZE
= dirsize
<< 1;
827 if ((hashp
->dir
[new_segnum
] =
828 calloc((size_t)hashp
->SGSIZE
, sizeof(SEGMENT
))) == NULL
)
834 * If the split point is increasing (MAX_BUCKET's log base 2
835 * * increases), we need to copy the current contents of the spare
836 * split bucket to the next bucket.
838 spare_ndx
= __log2((uint32_t)(hashp
->MAX_BUCKET
+ 1));
839 if (spare_ndx
> hashp
->OVFL_POINT
) {
840 hashp
->SPARES
[spare_ndx
] = hashp
->SPARES
[hashp
->OVFL_POINT
];
841 hashp
->OVFL_POINT
= spare_ndx
;
844 if (new_bucket
> (uint32_t)hashp
->HIGH_MASK
) {
845 /* Starting a new doubling */
846 hashp
->LOW_MASK
= hashp
->HIGH_MASK
;
847 hashp
->HIGH_MASK
= new_bucket
| hashp
->LOW_MASK
;
849 /* Relocate records to the new bucket */
850 return (__split_page(hashp
, old_bucket
, new_bucket
));
854 * If realloc guarantees that the pointer is not destroyed if the realloc
855 * fails, then this routine can go away.
858 hash_realloc(SEGMENT
**p_ptr
, size_t oldsize
, size_t newsize
)
862 if ((p
= malloc(newsize
)) != NULL
) {
863 memmove(p
, *p_ptr
, oldsize
);
864 memset((char *)p
+ oldsize
, 0, newsize
- oldsize
);
872 __call_hash(HTAB
*hashp
, char *k
, int len
)
876 n
= hashp
->hash(k
, (size_t)len
);
877 bucket
= n
& hashp
->HIGH_MASK
;
878 if (bucket
> hashp
->MAX_BUCKET
)
879 bucket
= bucket
& hashp
->LOW_MASK
;
884 * Allocate segment table. On error, destroy the table and set errno.
886 * Returns 0 on success
889 alloc_segs(HTAB
*hashp
, int nsegs
)
896 hashp
->dir
= calloc((size_t)hashp
->DSIZE
, sizeof(SEGMENT
*));
897 if (hashp
->dir
== NULL
) {
899 (void)hdestroy(hashp
);
903 hashp
->nsegs
= nsegs
;
906 /* Allocate segments */
907 store
= calloc((size_t)(nsegs
<< hashp
->SSHIFT
), sizeof(SEGMENT
));
910 (void)hdestroy(hashp
);
914 for (i
= 0; i
< nsegs
; i
++)
915 hashp
->dir
[i
] = &store
[i
<< hashp
->SSHIFT
];
919 #if BYTE_ORDER == LITTLE_ENDIAN
921 * Hashp->hdr needs to be byteswapped.
924 swap_header_copy(HASHHDR
*srcp
, HASHHDR
*destp
)
928 P_32_COPY(srcp
->magic
, destp
->magic
);
929 P_32_COPY(srcp
->version
, destp
->version
);
930 P_32_COPY(srcp
->lorder
, destp
->lorder
);
931 P_32_COPY(srcp
->bsize
, destp
->bsize
);
932 P_32_COPY(srcp
->bshift
, destp
->bshift
);
933 P_32_COPY(srcp
->dsize
, destp
->dsize
);
934 P_32_COPY(srcp
->ssize
, destp
->ssize
);
935 P_32_COPY(srcp
->sshift
, destp
->sshift
);
936 P_32_COPY(srcp
->ovfl_point
, destp
->ovfl_point
);
937 P_32_COPY(srcp
->last_freed
, destp
->last_freed
);
938 P_32_COPY(srcp
->max_bucket
, destp
->max_bucket
);
939 P_32_COPY(srcp
->high_mask
, destp
->high_mask
);
940 P_32_COPY(srcp
->low_mask
, destp
->low_mask
);
941 P_32_COPY(srcp
->ffactor
, destp
->ffactor
);
942 P_32_COPY(srcp
->nkeys
, destp
->nkeys
);
943 P_32_COPY(srcp
->hdrpages
, destp
->hdrpages
);
944 P_32_COPY(srcp
->h_charkey
, destp
->h_charkey
);
945 for (i
= 0; i
< NCACHED
; i
++) {
946 P_32_COPY(srcp
->spares
[i
], destp
->spares
[i
]);
947 P_16_COPY(srcp
->bitmaps
[i
], destp
->bitmaps
[i
]);
952 swap_header(HTAB
*hashp
)
959 M_32_SWAP(hdrp
->magic
);
960 M_32_SWAP(hdrp
->version
);
961 M_32_SWAP(hdrp
->lorder
);
962 M_32_SWAP(hdrp
->bsize
);
963 M_32_SWAP(hdrp
->bshift
);
964 M_32_SWAP(hdrp
->dsize
);
965 M_32_SWAP(hdrp
->ssize
);
966 M_32_SWAP(hdrp
->sshift
);
967 M_32_SWAP(hdrp
->ovfl_point
);
968 M_32_SWAP(hdrp
->last_freed
);
969 M_32_SWAP(hdrp
->max_bucket
);
970 M_32_SWAP(hdrp
->high_mask
);
971 M_32_SWAP(hdrp
->low_mask
);
972 M_32_SWAP(hdrp
->ffactor
);
973 M_32_SWAP(hdrp
->nkeys
);
974 M_32_SWAP(hdrp
->hdrpages
);
975 M_32_SWAP(hdrp
->h_charkey
);
976 for (i
= 0; i
< NCACHED
; i
++) {
977 M_32_SWAP(hdrp
->spares
[i
]);
978 M_16_SWAP(hdrp
->bitmaps
[i
]);