1 /* $NetBSD: hash.c,v 1.35 2015/06/22 21:16:02 christos Exp $ */
4 * Copyright (c) 1990, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #if HAVE_NBTOOL_CONFIG_H
36 #include "nbtool_config.h"
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: hash.c,v 1.35 2015/06/22 21:16:02 christos Exp $");
42 #include "namespace.h"
43 #include <sys/param.h>
59 static int alloc_segs(HTAB
*, int);
60 static int flush_meta(HTAB
*);
61 static int hash_access(HTAB
*, ACTION
, DBT
*, DBT
*);
62 static int hash_close(DB
*);
63 static int hash_delete(const DB
*, const DBT
*, uint32_t);
64 static int hash_fd(const DB
*);
65 static int hash_get(const DB
*, const DBT
*, DBT
*, uint32_t);
66 static int hash_put(const DB
*, DBT
*, const DBT
*, uint32_t);
67 static void *hash_realloc(SEGMENT
**, size_t, size_t);
68 static int hash_seq(const DB
*, DBT
*, DBT
*, uint32_t);
69 static int hash_sync(const DB
*, uint32_t);
70 static int hdestroy(HTAB
*);
71 static HTAB
*init_hash(HTAB
*, const char *, const HASHINFO
*);
72 static int init_htab(HTAB
*, size_t);
73 #if BYTE_ORDER == LITTLE_ENDIAN
74 static void swap_header(HTAB
*);
75 static void swap_header_copy(HASHHDR
*, HASHHDR
*);
78 /* Fast arithmetic, relying on powers of 2, */
79 #define MOD(x, y) ((x) & ((y) - 1))
81 #define RETURN_ERROR(ERR, LOC) { save_errno = ERR; goto LOC; }
88 #ifdef HASH_STATISTICS
89 int hash_accesses
, hash_collisions
, hash_expansions
, hash_overflows
;
92 /************************** INTERFACE ROUTINES ***************************/
97 __hash_open(const char *file
, int flags
, mode_t mode
, const HASHINFO
*info
,
103 int bpages
, new_table
, nsegs
, save_errno
;
106 if ((flags
& O_ACCMODE
) == O_WRONLY
) {
111 if (!(hashp
= calloc(1, sizeof(HTAB
))))
116 * Even if user wants write only, we need to be able to read
117 * the actual file, so we need to open it read/write. But, the
118 * field in the hashp structure needs to be accurate so that
119 * we can check accesses.
121 hashp
->flags
= flags
;
124 if (!file
|| (flags
& O_TRUNC
) ||
125 (stat(file
, &statbuf
) && (errno
== ENOENT
))) {
127 errno
= 0; /* Just in case someone looks at errno */
131 if ((hashp
->fp
= __dbopen(file
, flags
, mode
, &statbuf
)) == -1)
132 RETURN_ERROR(errno
, error0
);
133 new_table
|= statbuf
.st_size
== 0;
136 if (!(hashp
= init_hash(hashp
, file
, info
)))
137 RETURN_ERROR(errno
, error1
);
139 /* Table already exists */
140 if (info
&& info
->hash
)
141 hashp
->hash
= info
->hash
;
143 hashp
->hash
= __default_hash
;
145 hdrsize
= read(hashp
->fp
, &hashp
->hdr
, sizeof(HASHHDR
));
146 #if BYTE_ORDER == LITTLE_ENDIAN
150 RETURN_ERROR(errno
, error1
);
151 if (hdrsize
!= sizeof(HASHHDR
))
152 RETURN_ERROR(EFTYPE
, error1
);
153 /* Verify file type, versions and hash function */
154 if (hashp
->MAGIC
!= HASHMAGIC
)
155 RETURN_ERROR(EFTYPE
, error1
);
156 #define OLDHASHVERSION 1
157 if (hashp
->VERSION
!= HASHVERSION
&&
158 hashp
->VERSION
!= OLDHASHVERSION
)
159 RETURN_ERROR(EFTYPE
, error1
);
160 if (hashp
->hash(CHARKEY
, sizeof(CHARKEY
)) !=
161 (uint32_t)hashp
->H_CHARKEY
)
162 RETURN_ERROR(EFTYPE
, error1
);
164 * Figure out how many segments we need. Max_Bucket is the
165 * maximum bucket number, so the number of buckets is
168 nsegs
= (hashp
->MAX_BUCKET
+ 1 + hashp
->SGSIZE
- 1) /
171 if (alloc_segs(hashp
, nsegs
))
173 * If alloc_segs fails, table will have been destroyed
174 * and errno will have been set.
177 /* Read in bitmaps */
178 bpages
= (hashp
->SPARES
[hashp
->OVFL_POINT
] +
179 (unsigned int)(hashp
->BSIZE
<< BYTE_SHIFT
) - 1) >>
180 (hashp
->BSHIFT
+ BYTE_SHIFT
);
182 hashp
->nmaps
= bpages
;
183 (void)memset(&hashp
->mapp
[0], 0, bpages
* sizeof(uint32_t *));
186 /* Initialize Buffer Manager */
187 if (info
&& info
->cachesize
)
188 __buf_init(hashp
, info
->cachesize
);
190 __buf_init(hashp
, DEF_BUFSIZE
);
192 hashp
->new_file
= new_table
;
193 hashp
->save_file
= file
&& (hashp
->flags
& O_RDWR
);
195 if (!(dbp
= malloc(sizeof(*dbp
)))) {
201 dbp
->internal
= hashp
;
202 dbp
->close
= hash_close
;
203 dbp
->del
= hash_delete
;
208 dbp
->sync
= hash_sync
;
212 (void)fprintf(stderr
,
213 "%s\n%s%p\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%x\n%s%x\n%s%d\n%s%d\n",
215 "TABLE POINTER ", hashp
,
216 "BUCKET SIZE ", hashp
->BSIZE
,
217 "BUCKET SHIFT ", hashp
->BSHIFT
,
218 "DIRECTORY SIZE ", hashp
->DSIZE
,
219 "SEGMENT SIZE ", hashp
->SGSIZE
,
220 "SEGMENT SHIFT ", hashp
->SSHIFT
,
221 "FILL FACTOR ", hashp
->FFACTOR
,
222 "MAX BUCKET ", hashp
->MAX_BUCKET
,
223 "OVFL POINT ", hashp
->OVFL_POINT
,
224 "LAST FREED ", hashp
->LAST_FREED
,
225 "HIGH MASK ", hashp
->HIGH_MASK
,
226 "LOW MASK ", hashp
->LOW_MASK
,
227 "NSEGS ", hashp
->nsegs
,
228 "NKEYS ", hashp
->NKEYS
);
230 #ifdef HASH_STATISTICS
231 hash_overflows
= hash_accesses
= hash_collisions
= hash_expansions
= 0;
237 (void)close(hashp
->fp
);
254 hashp
= dbp
->internal
;
255 retval
= hdestroy(hashp
);
261 hash_fd(const DB
*dbp
)
268 hashp
= dbp
->internal
;
269 if (hashp
->fp
== -1) {
276 /************************** LOCAL CREATION ROUTINES **********************/
278 init_hash(HTAB
*hashp
, const char *file
, const HASHINFO
*info
)
285 hashp
->LORDER
= BYTE_ORDER
;
286 hashp
->BSIZE
= DEF_BUCKET_SIZE
;
287 hashp
->BSHIFT
= DEF_BUCKET_SHIFT
;
288 hashp
->SGSIZE
= DEF_SEGSIZE
;
289 hashp
->SSHIFT
= DEF_SEGSIZE_SHIFT
;
290 hashp
->DSIZE
= DEF_DIRSIZE
;
291 hashp
->FFACTOR
= DEF_FFACTOR
;
292 hashp
->hash
= __default_hash
;
293 memset(hashp
->SPARES
, 0, sizeof(hashp
->SPARES
));
294 memset(hashp
->BITMAPS
, 0, sizeof (hashp
->BITMAPS
));
296 /* Fix bucket size to be optimal for file system */
298 if (stat(file
, &statbuf
))
300 hashp
->BSIZE
= MIN(statbuf
.st_blksize
, MAX_BSIZE
);
301 hashp
->BSHIFT
= __log2((uint32_t)hashp
->BSIZE
);
306 /* Round pagesize up to power of 2 */
307 hashp
->BSHIFT
= __log2(info
->bsize
);
308 hashp
->BSIZE
= 1 << hashp
->BSHIFT
;
309 if (hashp
->BSIZE
> MAX_BSIZE
) {
315 hashp
->FFACTOR
= info
->ffactor
;
317 hashp
->hash
= info
->hash
;
321 if (info
->lorder
!= BIG_ENDIAN
&&
322 info
->lorder
!= LITTLE_ENDIAN
) {
326 hashp
->LORDER
= info
->lorder
;
329 /* init_htab should destroy the table and set errno if it fails */
330 if (init_htab(hashp
, (size_t)nelem
))
336 * This calls alloc_segs which may run out of memory. Alloc_segs will destroy
337 * the table and set errno, so we just pass the error information along.
339 * Returns 0 on No Error
342 init_htab(HTAB
*hashp
, size_t nelem
)
349 * Divide number of elements by the fill factor and determine a
350 * desired number of buckets. Allocate space for the next greater
351 * power of two number of buckets.
353 nelem
= (nelem
- 1) / hashp
->FFACTOR
+ 1;
355 _DBFIT(nelem
, uint32_t);
356 l2
= __log2(MAX((uint32_t)nelem
, 2));
359 hashp
->SPARES
[l2
] = l2
+ 1;
360 hashp
->SPARES
[l2
+ 1] = l2
+ 1;
361 hashp
->OVFL_POINT
= l2
;
362 hashp
->LAST_FREED
= 2;
364 /* First bitmap page is at: splitpoint l2 page offset 1 */
365 if (__ibitmap(hashp
, (int)OADDR_OF(l2
, 1), l2
+ 1, 0))
368 hashp
->MAX_BUCKET
= hashp
->LOW_MASK
= nbuckets
- 1;
369 hashp
->HIGH_MASK
= (nbuckets
<< 1) - 1;
370 /* LINTED constant in conditional context */
371 hashp
->HDRPAGES
= ((MAX(sizeof(HASHHDR
), MINHDRSIZE
) - 1) >>
374 nsegs
= (nbuckets
- 1) / hashp
->SGSIZE
+ 1;
375 nsegs
= 1 << __log2(nsegs
);
377 if (nsegs
> (uint32_t)hashp
->DSIZE
)
378 hashp
->DSIZE
= nsegs
;
379 return (alloc_segs(hashp
, (int)nsegs
));
382 /********************** DESTROY/CLOSE ROUTINES ************************/
385 * Flushes any changes to the file if necessary and destroys the hashp
386 * structure, freeing all allocated space.
389 hdestroy(HTAB
*hashp
)
395 #ifdef HASH_STATISTICS
396 (void)fprintf(stderr
, "hdestroy: accesses %d collisions %d\n",
397 hash_accesses
, hash_collisions
);
398 (void)fprintf(stderr
, "hdestroy: expansions %d\n",
400 (void)fprintf(stderr
, "hdestroy: overflows %d\n",
402 (void)fprintf(stderr
, "keys %d maxp %d segmentcount %d\n",
403 hashp
->NKEYS
, hashp
->MAX_BUCKET
, hashp
->nsegs
);
405 for (i
= 0; i
< NCACHED
; i
++)
406 (void)fprintf(stderr
,
407 "spares[%d] = %d\n", i
, hashp
->SPARES
[i
]);
410 * Call on buffer manager to free buffers, and if required,
411 * write them to disk.
413 if (__buf_free(hashp
, 1, hashp
->save_file
))
416 free(*hashp
->dir
); /* Free initial segments */
417 /* Free extra segments */
418 while (hashp
->exsegs
--)
419 free(hashp
->dir
[--hashp
->nsegs
]);
422 if (flush_meta(hashp
) && !save_errno
)
425 for (i
= 0; i
< hashp
->nmaps
; i
++)
427 free(hashp
->mapp
[i
]);
430 (void)close(hashp
->fp
);
441 * Write modified pages to disk
448 hash_sync(const DB
*dbp
, uint32_t flags
)
460 hashp
= dbp
->internal
;
461 if (!hashp
->save_file
)
463 if (__buf_free(hashp
, 0, 1) || flush_meta(hashp
))
472 * -1 indicates that errno should be set
475 flush_meta(HTAB
*hashp
)
478 #if BYTE_ORDER == LITTLE_ENDIAN
484 if (!hashp
->save_file
)
486 hashp
->MAGIC
= HASHMAGIC
;
487 hashp
->VERSION
= HASHVERSION
;
488 hashp
->H_CHARKEY
= hashp
->hash(CHARKEY
, sizeof(CHARKEY
));
492 #if BYTE_ORDER == LITTLE_ENDIAN
494 swap_header_copy(&hashp
->hdr
, whdrp
);
496 if ((wsize
= pwrite(fp
, whdrp
, sizeof(HASHHDR
), (off_t
)0)) == -1)
499 if (wsize
!= sizeof(HASHHDR
)) {
504 for (i
= 0; i
< NCACHED
; i
++)
506 if (__put_page(hashp
, (char *)(void *)hashp
->mapp
[i
],
507 (u_int
)hashp
->BITMAPS
[i
], 0, 1))
512 /*******************************SEARCH ROUTINES *****************************/
514 * All the access routines return
518 * 1 to indicate an external ERROR (i.e. key not found, etc)
519 * -1 to indicate an internal ERROR (i.e. out of memory, etc)
522 hash_get(const DB
*dbp
, const DBT
*key
, DBT
*data
, uint32_t flag
)
526 hashp
= dbp
->internal
;
528 hashp
->err
= errno
= EINVAL
;
531 return (hash_access(hashp
, HASH_GET
, __UNCONST(key
), data
));
535 hash_put(const DB
*dbp
, DBT
*key
, const DBT
*data
, uint32_t flag
)
539 hashp
= dbp
->internal
;
540 if (flag
&& flag
!= R_NOOVERWRITE
) {
541 hashp
->err
= errno
= EINVAL
;
544 if ((hashp
->flags
& O_ACCMODE
) == O_RDONLY
) {
545 hashp
->err
= errno
= EPERM
;
548 /* LINTED const castaway */
549 return (hash_access(hashp
, flag
== R_NOOVERWRITE
?
550 HASH_PUTNEW
: HASH_PUT
, __UNCONST(key
), __UNCONST(data
)));
554 hash_delete(const DB
*dbp
, const DBT
*key
, uint32_t flag
)
558 hashp
= dbp
->internal
;
559 if (flag
&& flag
!= R_CURSOR
) {
560 hashp
->err
= errno
= EINVAL
;
563 if ((hashp
->flags
& O_ACCMODE
) == O_RDONLY
) {
564 hashp
->err
= errno
= EPERM
;
567 return hash_access(hashp
, HASH_DELETE
, __UNCONST(key
), NULL
);
571 * Assume that hashp has been set in wrapper routine.
574 hash_access(HTAB
*hashp
, ACTION action
, DBT
*key
, DBT
*val
)
577 BUFHEAD
*bufp
, *save_bufp
;
584 #ifdef HASH_STATISTICS
590 kp
= (char *)key
->data
;
591 rbufp
= __get_buf(hashp
, __call_hash(hashp
, kp
, (int)size
), NULL
, 0);
596 /* Pin the bucket chain */
597 rbufp
->flags
|= BUF_PIN
;
598 for (bp
= (uint16_t *)(void *)rbufp
->page
, n
= *bp
++, ndx
= 1; ndx
< n
;)
599 if (bp
[1] >= REAL_KEY
) {
600 /* Real key/data pair */
601 if (size
== (size_t)(off
- *bp
) &&
602 memcmp(kp
, rbufp
->page
+ *bp
, size
) == 0)
605 #ifdef HASH_STATISTICS
610 } else if (bp
[1] == OVFLPAGE
) {
611 rbufp
= __get_buf(hashp
, (uint32_t)*bp
, rbufp
, 0);
613 save_bufp
->flags
&= ~BUF_PIN
;
617 bp
= (uint16_t *)(void *)rbufp
->page
;
621 } else if (bp
[1] < REAL_KEY
) {
623 __find_bigpair(hashp
, rbufp
, ndx
, kp
, (int)size
)) > 0)
628 __find_last_page(hashp
, &bufp
))) {
633 rbufp
= __get_buf(hashp
, (uint32_t)pageno
,
636 save_bufp
->flags
&= ~BUF_PIN
;
640 bp
= (uint16_t *)(void *)rbufp
->page
;
645 save_bufp
->flags
&= ~BUF_PIN
;
654 if (__addel(hashp
, rbufp
, key
, val
)) {
655 save_bufp
->flags
&= ~BUF_PIN
;
658 save_bufp
->flags
&= ~BUF_PIN
;
664 save_bufp
->flags
&= ~BUF_PIN
;
671 save_bufp
->flags
&= ~BUF_PIN
;
674 bp
= (uint16_t *)(void *)rbufp
->page
;
675 if (bp
[ndx
+ 1] < REAL_KEY
) {
676 if (__big_return(hashp
, rbufp
, ndx
, val
, 0))
679 val
->data
= (uint8_t *)rbufp
->page
+ (int)bp
[ndx
+ 1];
680 val
->size
= bp
[ndx
] - bp
[ndx
+ 1];
684 if ((__delpair(hashp
, rbufp
, ndx
)) ||
685 (__addel(hashp
, rbufp
, key
, val
))) {
686 save_bufp
->flags
&= ~BUF_PIN
;
691 if (__delpair(hashp
, rbufp
, ndx
))
694 * Our index lags 2 behind on the same page when we are
695 * deleting the element pointed to by the index; otherwise
696 * deleting randomly from an iterated hash produces undefined
699 if (ndx
!= hashp
->cndx
- 2 || rbufp
!= hashp
->cpage
)
702 if (hashp
->cndx
> 1) {
703 /* Move back one element */
707 * Move back one page, and indicate to go to the last
708 * element of the previous page by setting cndx to -1
718 save_bufp
->flags
&= ~BUF_PIN
;
723 hash_seq(const DB
*dbp
, DBT
*key
, DBT
*data
, uint32_t flag
)
726 BUFHEAD
*bufp
= NULL
; /* XXX: gcc */
730 hashp
= dbp
->internal
;
731 if (flag
&& flag
!= R_FIRST
&& flag
!= R_NEXT
) {
732 hashp
->err
= errno
= EINVAL
;
735 #ifdef HASH_STATISTICS
738 if ((hashp
->cbucket
< 0) || (flag
== R_FIRST
)) {
745 for (bp
= NULL
; !bp
|| !bp
[0]; ) {
746 if (!(bufp
= hashp
->cpage
)) {
747 for (bucket
= hashp
->cbucket
;
748 bucket
<= (uint32_t)hashp
->MAX_BUCKET
;
750 bufp
= __get_buf(hashp
, bucket
, NULL
, 0);
754 bp
= (uint16_t *)(void *)bufp
->page
;
758 hashp
->cbucket
= bucket
;
759 if (hashp
->cbucket
> hashp
->MAX_BUCKET
) {
763 if (hashp
->cndx
== -1) {
764 /* move to the last element of the page */
766 while (bp
[hashp
->cndx
- 1] != 0)
769 /* start on the first element */
773 bp
= (uint16_t *)(void *)hashp
->cpage
->page
;
774 if (flag
== R_NEXT
|| flag
== 0) {
775 if (hashp
->cndx
> bp
[0]) {
785 _DIAGASSERT(bp
!= NULL
);
786 _DIAGASSERT(bufp
!= NULL
);
787 while (bp
[hashp
->cndx
+ 1] == OVFLPAGE
) {
788 bufp
= hashp
->cpage
=
789 __get_buf(hashp
, (uint32_t)bp
[hashp
->cndx
], bufp
,
793 bp
= (uint16_t *)(void *)(bufp
->page
);
802 if (bp
[ndx
+ 1] < REAL_KEY
) {
803 if (__big_keydata(hashp
, bufp
, key
, data
, 1))
806 if (hashp
->cpage
== NULL
)
808 key
->data
= (uint8_t *)hashp
->cpage
->page
+ bp
[ndx
];
809 key
->size
= (ndx
> 1 ? bp
[ndx
- 1] : hashp
->BSIZE
) - bp
[ndx
];
810 data
->data
= (uint8_t *)hashp
->cpage
->page
+ bp
[ndx
+ 1];
811 data
->size
= bp
[ndx
] - bp
[ndx
+ 1];
817 /********************************* UTILITIES ************************/
825 __expand_table(HTAB
*hashp
)
827 uint32_t old_bucket
, new_bucket
;
828 int new_segnum
, spare_ndx
;
831 #ifdef HASH_STATISTICS
834 new_bucket
= ++hashp
->MAX_BUCKET
;
835 old_bucket
= (hashp
->MAX_BUCKET
& hashp
->LOW_MASK
);
837 new_segnum
= new_bucket
>> hashp
->SSHIFT
;
839 /* Check if we need a new segment */
840 if (new_segnum
>= hashp
->nsegs
) {
841 /* Check if we need to expand directory */
842 if (new_segnum
>= hashp
->DSIZE
) {
843 /* Reallocate directory */
844 dirsize
= hashp
->DSIZE
* sizeof(SEGMENT
*);
845 if (!hash_realloc(&hashp
->dir
, dirsize
, dirsize
<< 1))
848 _DBFIT(dirsize
, uint32_t);
849 hashp
->DSIZE
= (uint32_t)dirsize
;
851 if ((hashp
->dir
[new_segnum
] =
852 calloc((size_t)hashp
->SGSIZE
, sizeof(SEGMENT
))) == NULL
)
858 * If the split point is increasing (MAX_BUCKET's log base 2
859 * * increases), we need to copy the current contents of the spare
860 * split bucket to the next bucket.
862 spare_ndx
= __log2((uint32_t)(hashp
->MAX_BUCKET
+ 1));
863 if (spare_ndx
> hashp
->OVFL_POINT
) {
864 hashp
->SPARES
[spare_ndx
] = hashp
->SPARES
[hashp
->OVFL_POINT
];
865 hashp
->OVFL_POINT
= spare_ndx
;
868 if (new_bucket
> (uint32_t)hashp
->HIGH_MASK
) {
869 /* Starting a new doubling */
870 hashp
->LOW_MASK
= hashp
->HIGH_MASK
;
871 hashp
->HIGH_MASK
= new_bucket
| hashp
->LOW_MASK
;
873 /* Relocate records to the new bucket */
874 return (__split_page(hashp
, old_bucket
, new_bucket
));
878 * If realloc guarantees that the pointer is not destroyed if the realloc
879 * fails, then this routine can go away.
882 hash_realloc(SEGMENT
**p_ptr
, size_t oldsize
, size_t newsize
)
886 if ((p
= malloc(newsize
)) != NULL
) {
887 memmove(p
, *p_ptr
, oldsize
);
888 memset((char *)p
+ oldsize
, 0, newsize
- oldsize
);
896 __call_hash(HTAB
*hashp
, char *k
, int len
)
900 n
= hashp
->hash(k
, (size_t)len
);
901 bucket
= n
& hashp
->HIGH_MASK
;
902 if (bucket
> hashp
->MAX_BUCKET
)
903 bucket
= bucket
& hashp
->LOW_MASK
;
908 * Allocate segment table. On error, destroy the table and set errno.
910 * Returns 0 on success
913 alloc_segs(HTAB
*hashp
, int nsegs
)
920 hashp
->dir
= calloc((size_t)hashp
->DSIZE
, sizeof(SEGMENT
*));
921 if (hashp
->dir
== NULL
) {
923 (void)hdestroy(hashp
);
927 hashp
->nsegs
= nsegs
;
930 /* Allocate segments */
931 store
= calloc((size_t)(nsegs
<< hashp
->SSHIFT
), sizeof(SEGMENT
));
934 (void)hdestroy(hashp
);
938 for (i
= 0; i
< nsegs
; i
++)
939 hashp
->dir
[i
] = &store
[i
<< hashp
->SSHIFT
];
943 #if BYTE_ORDER == LITTLE_ENDIAN
945 * Hashp->hdr needs to be byteswapped.
948 swap_header_copy(HASHHDR
*srcp
, HASHHDR
*destp
)
952 P_32_COPY(srcp
->magic
, destp
->magic
);
953 P_32_COPY(srcp
->version
, destp
->version
);
954 P_32_COPY(srcp
->lorder
, destp
->lorder
);
955 P_32_COPY(srcp
->bsize
, destp
->bsize
);
956 P_32_COPY(srcp
->bshift
, destp
->bshift
);
957 P_32_COPY(srcp
->dsize
, destp
->dsize
);
958 P_32_COPY(srcp
->ssize
, destp
->ssize
);
959 P_32_COPY(srcp
->sshift
, destp
->sshift
);
960 P_32_COPY(srcp
->ovfl_point
, destp
->ovfl_point
);
961 P_32_COPY(srcp
->last_freed
, destp
->last_freed
);
962 P_32_COPY(srcp
->max_bucket
, destp
->max_bucket
);
963 P_32_COPY(srcp
->high_mask
, destp
->high_mask
);
964 P_32_COPY(srcp
->low_mask
, destp
->low_mask
);
965 P_32_COPY(srcp
->ffactor
, destp
->ffactor
);
966 P_32_COPY(srcp
->nkeys
, destp
->nkeys
);
967 P_32_COPY(srcp
->hdrpages
, destp
->hdrpages
);
968 P_32_COPY(srcp
->h_charkey
, destp
->h_charkey
);
969 for (i
= 0; i
< NCACHED
; i
++) {
970 P_32_COPY(srcp
->spares
[i
], destp
->spares
[i
]);
971 P_16_COPY(srcp
->bitmaps
[i
], destp
->bitmaps
[i
]);
976 swap_header(HTAB
*hashp
)
983 M_32_SWAP(hdrp
->magic
);
984 M_32_SWAP(hdrp
->version
);
985 M_32_SWAP(hdrp
->lorder
);
986 M_32_SWAP(hdrp
->bsize
);
987 M_32_SWAP(hdrp
->bshift
);
988 M_32_SWAP(hdrp
->dsize
);
989 M_32_SWAP(hdrp
->ssize
);
990 M_32_SWAP(hdrp
->sshift
);
991 M_32_SWAP(hdrp
->ovfl_point
);
992 M_32_SWAP(hdrp
->last_freed
);
993 M_32_SWAP(hdrp
->max_bucket
);
994 M_32_SWAP(hdrp
->high_mask
);
995 M_32_SWAP(hdrp
->low_mask
);
996 M_32_SWAP(hdrp
->ffactor
);
997 M_32_SWAP(hdrp
->nkeys
);
998 M_32_SWAP(hdrp
->hdrpages
);
999 M_32_SWAP(hdrp
->h_charkey
);
1000 for (i
= 0; i
< NCACHED
; i
++) {
1001 M_32_SWAP(hdrp
->spares
[i
]);
1002 M_16_SWAP(hdrp
->bitmaps
[i
]);