1 /* $NetBSD: hash.c,v 1.30 2008/09/11 12:58:00 joerg Exp $ */
4 * Copyright (c) 1990, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #if HAVE_NBTOOL_CONFIG_H
36 #include "nbtool_config.h"
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: hash.c,v 1.30 2008/09/11 12:58:00 joerg Exp $");
42 #include "namespace.h"
43 #include <sys/param.h>
59 static int alloc_segs(HTAB
*, int);
60 static int flush_meta(HTAB
*);
61 static int hash_access(HTAB
*, ACTION
, DBT
*, DBT
*);
62 static int hash_close(DB
*);
63 static int hash_delete(const DB
*, const DBT
*, uint32_t);
64 static int hash_fd(const DB
*);
65 static int hash_get(const DB
*, const DBT
*, DBT
*, uint32_t);
66 static int hash_put(const DB
*, DBT
*, const DBT
*, uint32_t);
67 static void *hash_realloc(SEGMENT
**, size_t, size_t);
68 static int hash_seq(const DB
*, DBT
*, DBT
*, uint32_t);
69 static int hash_sync(const DB
*, uint32_t);
70 static int hdestroy(HTAB
*);
71 static HTAB
*init_hash(HTAB
*, const char *, const HASHINFO
*);
72 static int init_htab(HTAB
*, size_t);
73 #if BYTE_ORDER == LITTLE_ENDIAN
74 static void swap_header(HTAB
*);
75 static void swap_header_copy(HASHHDR
*, HASHHDR
*);
78 /* Fast arithmetic, relying on powers of 2, */
79 #define MOD(x, y) ((x) & ((y) - 1))
81 #define RETURN_ERROR(ERR, LOC) { save_errno = ERR; goto LOC; }
88 #ifdef HASH_STATISTICS
89 int hash_accesses
, hash_collisions
, hash_expansions
, hash_overflows
;
92 /************************** INTERFACE ROUTINES ***************************/
97 __hash_open(const char *file
, int flags
, mode_t mode
, const HASHINFO
*info
,
103 int bpages
, new_table
, nsegs
, save_errno
;
106 if ((flags
& O_ACCMODE
) == O_WRONLY
) {
111 if (!(hashp
= calloc(1, sizeof(HTAB
))))
116 * Even if user wants write only, we need to be able to read
117 * the actual file, so we need to open it read/write. But, the
118 * field in the hashp structure needs to be accurate so that
119 * we can check accesses.
121 hashp
->flags
= flags
;
124 if (!file
|| (flags
& O_TRUNC
) ||
125 (stat(file
, &statbuf
) && (errno
== ENOENT
))) {
127 errno
= 0; /* Just in case someone looks at errno */
131 if ((hashp
->fp
= open(file
, flags
, mode
)) == -1)
132 RETURN_ERROR(errno
, error0
);
133 if (fcntl(hashp
->fp
, F_SETFD
, FD_CLOEXEC
) == -1)
134 RETURN_ERROR(errno
, error1
);
135 if (fstat(hashp
->fp
, &statbuf
) == -1)
136 RETURN_ERROR(errno
, error1
);
137 new_table
|= statbuf
.st_size
== 0;
140 if (!(hashp
= init_hash(hashp
, file
, info
)))
141 RETURN_ERROR(errno
, error1
);
143 /* Table already exists */
144 if (info
&& info
->hash
)
145 hashp
->hash
= info
->hash
;
147 hashp
->hash
= __default_hash
;
149 hdrsize
= read(hashp
->fp
, &hashp
->hdr
, sizeof(HASHHDR
));
150 #if BYTE_ORDER == LITTLE_ENDIAN
154 RETURN_ERROR(errno
, error1
);
155 if (hdrsize
!= sizeof(HASHHDR
))
156 RETURN_ERROR(EFTYPE
, error1
);
157 /* Verify file type, versions and hash function */
158 if (hashp
->MAGIC
!= HASHMAGIC
)
159 RETURN_ERROR(EFTYPE
, error1
);
160 #define OLDHASHVERSION 1
161 if (hashp
->VERSION
!= HASHVERSION
&&
162 hashp
->VERSION
!= OLDHASHVERSION
)
163 RETURN_ERROR(EFTYPE
, error1
);
164 if (hashp
->hash(CHARKEY
, sizeof(CHARKEY
)) !=
165 (uint32_t)hashp
->H_CHARKEY
)
166 RETURN_ERROR(EFTYPE
, error1
);
168 * Figure out how many segments we need. Max_Bucket is the
169 * maximum bucket number, so the number of buckets is
172 nsegs
= (hashp
->MAX_BUCKET
+ 1 + hashp
->SGSIZE
- 1) /
175 if (alloc_segs(hashp
, nsegs
))
177 * If alloc_segs fails, table will have been destroyed
178 * and errno will have been set.
181 /* Read in bitmaps */
182 bpages
= (hashp
->SPARES
[hashp
->OVFL_POINT
] +
183 (unsigned int)(hashp
->BSIZE
<< BYTE_SHIFT
) - 1) >>
184 (hashp
->BSHIFT
+ BYTE_SHIFT
);
186 hashp
->nmaps
= bpages
;
187 (void)memset(&hashp
->mapp
[0], 0, bpages
* sizeof(uint32_t *));
190 /* Initialize Buffer Manager */
191 if (info
&& info
->cachesize
)
192 __buf_init(hashp
, info
->cachesize
);
194 __buf_init(hashp
, DEF_BUFSIZE
);
196 hashp
->new_file
= new_table
;
197 hashp
->save_file
= file
&& (hashp
->flags
& O_RDWR
);
199 if (!(dbp
= malloc(sizeof(DB
)))) {
205 dbp
->internal
= hashp
;
206 dbp
->close
= hash_close
;
207 dbp
->del
= hash_delete
;
212 dbp
->sync
= hash_sync
;
216 (void)fprintf(stderr
,
217 "%s\n%s%p\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%x\n%s%x\n%s%d\n%s%d\n",
219 "TABLE POINTER ", hashp
,
220 "BUCKET SIZE ", hashp
->BSIZE
,
221 "BUCKET SHIFT ", hashp
->BSHIFT
,
222 "DIRECTORY SIZE ", hashp
->DSIZE
,
223 "SEGMENT SIZE ", hashp
->SGSIZE
,
224 "SEGMENT SHIFT ", hashp
->SSHIFT
,
225 "FILL FACTOR ", hashp
->FFACTOR
,
226 "MAX BUCKET ", hashp
->MAX_BUCKET
,
227 "OVFL POINT ", hashp
->OVFL_POINT
,
228 "LAST FREED ", hashp
->LAST_FREED
,
229 "HIGH MASK ", hashp
->HIGH_MASK
,
230 "LOW MASK ", hashp
->LOW_MASK
,
231 "NSEGS ", hashp
->nsegs
,
232 "NKEYS ", hashp
->NKEYS
);
234 #ifdef HASH_STATISTICS
235 hash_overflows
= hash_accesses
= hash_collisions
= hash_expansions
= 0;
241 (void)close(hashp
->fp
);
258 hashp
= dbp
->internal
;
259 retval
= hdestroy(hashp
);
265 hash_fd(const DB
*dbp
)
272 hashp
= dbp
->internal
;
273 if (hashp
->fp
== -1) {
280 /************************** LOCAL CREATION ROUTINES **********************/
282 init_hash(HTAB
*hashp
, const char *file
, const HASHINFO
*info
)
289 hashp
->LORDER
= BYTE_ORDER
;
290 hashp
->BSIZE
= DEF_BUCKET_SIZE
;
291 hashp
->BSHIFT
= DEF_BUCKET_SHIFT
;
292 hashp
->SGSIZE
= DEF_SEGSIZE
;
293 hashp
->SSHIFT
= DEF_SEGSIZE_SHIFT
;
294 hashp
->DSIZE
= DEF_DIRSIZE
;
295 hashp
->FFACTOR
= DEF_FFACTOR
;
296 hashp
->hash
= __default_hash
;
297 memset(hashp
->SPARES
, 0, sizeof(hashp
->SPARES
));
298 memset(hashp
->BITMAPS
, 0, sizeof (hashp
->BITMAPS
));
300 /* Fix bucket size to be optimal for file system */
302 if (stat(file
, &statbuf
))
304 hashp
->BSIZE
= MIN(statbuf
.st_blksize
, MAX_BSIZE
);
305 hashp
->BSHIFT
= __log2((uint32_t)hashp
->BSIZE
);
310 /* Round pagesize up to power of 2 */
311 hashp
->BSHIFT
= __log2(info
->bsize
);
312 hashp
->BSIZE
= 1 << hashp
->BSHIFT
;
313 if (hashp
->BSIZE
> MAX_BSIZE
) {
319 hashp
->FFACTOR
= info
->ffactor
;
321 hashp
->hash
= info
->hash
;
325 if (info
->lorder
!= BIG_ENDIAN
&&
326 info
->lorder
!= LITTLE_ENDIAN
) {
330 hashp
->LORDER
= info
->lorder
;
333 /* init_htab should destroy the table and set errno if it fails */
334 if (init_htab(hashp
, (size_t)nelem
))
340 * This calls alloc_segs which may run out of memory. Alloc_segs will destroy
341 * the table and set errno, so we just pass the error information along.
343 * Returns 0 on No Error
346 init_htab(HTAB
*hashp
, size_t nelem
)
353 * Divide number of elements by the fill factor and determine a
354 * desired number of buckets. Allocate space for the next greater
355 * power of two number of buckets.
357 nelem
= (nelem
- 1) / hashp
->FFACTOR
+ 1;
359 _DBFIT(nelem
, uint32_t);
360 l2
= __log2(MAX((uint32_t)nelem
, 2));
363 hashp
->SPARES
[l2
] = l2
+ 1;
364 hashp
->SPARES
[l2
+ 1] = l2
+ 1;
365 hashp
->OVFL_POINT
= l2
;
366 hashp
->LAST_FREED
= 2;
368 /* First bitmap page is at: splitpoint l2 page offset 1 */
369 if (__ibitmap(hashp
, (int)OADDR_OF(l2
, 1), l2
+ 1, 0))
372 hashp
->MAX_BUCKET
= hashp
->LOW_MASK
= nbuckets
- 1;
373 hashp
->HIGH_MASK
= (nbuckets
<< 1) - 1;
374 /* LINTED constant in conditional context */
375 hashp
->HDRPAGES
= ((MAX(sizeof(HASHHDR
), MINHDRSIZE
) - 1) >>
378 nsegs
= (nbuckets
- 1) / hashp
->SGSIZE
+ 1;
379 nsegs
= 1 << __log2(nsegs
);
381 if (nsegs
> (uint32_t)hashp
->DSIZE
)
382 hashp
->DSIZE
= nsegs
;
383 return (alloc_segs(hashp
, (int)nsegs
));
386 /********************** DESTROY/CLOSE ROUTINES ************************/
389 * Flushes any changes to the file if necessary and destroys the hashp
390 * structure, freeing all allocated space.
393 hdestroy(HTAB
*hashp
)
399 #ifdef HASH_STATISTICS
400 (void)fprintf(stderr
, "hdestroy: accesses %d collisions %d\n",
401 hash_accesses
, hash_collisions
);
402 (void)fprintf(stderr
, "hdestroy: expansions %d\n",
404 (void)fprintf(stderr
, "hdestroy: overflows %d\n",
406 (void)fprintf(stderr
, "keys %d maxp %d segmentcount %d\n",
407 hashp
->NKEYS
, hashp
->MAX_BUCKET
, hashp
->nsegs
);
409 for (i
= 0; i
< NCACHED
; i
++)
410 (void)fprintf(stderr
,
411 "spares[%d] = %d\n", i
, hashp
->SPARES
[i
]);
414 * Call on buffer manager to free buffers, and if required,
415 * write them to disk.
417 if (__buf_free(hashp
, 1, hashp
->save_file
))
420 free(*hashp
->dir
); /* Free initial segments */
421 /* Free extra segments */
422 while (hashp
->exsegs
--)
423 free(hashp
->dir
[--hashp
->nsegs
]);
426 if (flush_meta(hashp
) && !save_errno
)
429 for (i
= 0; i
< hashp
->nmaps
; i
++)
431 free(hashp
->mapp
[i
]);
434 (void)close(hashp
->fp
);
445 * Write modified pages to disk
452 hash_sync(const DB
*dbp
, uint32_t flags
)
464 hashp
= dbp
->internal
;
465 if (!hashp
->save_file
)
467 if (__buf_free(hashp
, 0, 1) || flush_meta(hashp
))
476 * -1 indicates that errno should be set
479 flush_meta(HTAB
*hashp
)
482 #if BYTE_ORDER == LITTLE_ENDIAN
488 if (!hashp
->save_file
)
490 hashp
->MAGIC
= HASHMAGIC
;
491 hashp
->VERSION
= HASHVERSION
;
492 hashp
->H_CHARKEY
= hashp
->hash(CHARKEY
, sizeof(CHARKEY
));
496 #if BYTE_ORDER == LITTLE_ENDIAN
498 swap_header_copy(&hashp
->hdr
, whdrp
);
500 if ((wsize
= pwrite(fp
, whdrp
, sizeof(HASHHDR
), (off_t
)0)) == -1)
503 if (wsize
!= sizeof(HASHHDR
)) {
508 for (i
= 0; i
< NCACHED
; i
++)
510 if (__put_page(hashp
, (char *)(void *)hashp
->mapp
[i
],
511 (u_int
)hashp
->BITMAPS
[i
], 0, 1))
516 /*******************************SEARCH ROUTINES *****************************/
518 * All the access routines return
522 * 1 to indicate an external ERROR (i.e. key not found, etc)
523 * -1 to indicate an internal ERROR (i.e. out of memory, etc)
526 hash_get(const DB
*dbp
, const DBT
*key
, DBT
*data
, uint32_t flag
)
530 hashp
= dbp
->internal
;
532 hashp
->err
= errno
= EINVAL
;
535 return (hash_access(hashp
, HASH_GET
, __UNCONST(key
), data
));
539 hash_put(const DB
*dbp
, DBT
*key
, const DBT
*data
, uint32_t flag
)
543 hashp
= dbp
->internal
;
544 if (flag
&& flag
!= R_NOOVERWRITE
) {
545 hashp
->err
= errno
= EINVAL
;
548 if ((hashp
->flags
& O_ACCMODE
) == O_RDONLY
) {
549 hashp
->err
= errno
= EPERM
;
552 /* LINTED const castaway */
553 return (hash_access(hashp
, flag
== R_NOOVERWRITE
?
554 HASH_PUTNEW
: HASH_PUT
, __UNCONST(key
), __UNCONST(data
)));
558 hash_delete(const DB
*dbp
, const DBT
*key
, uint32_t flag
)
562 hashp
= dbp
->internal
;
563 if (flag
&& flag
!= R_CURSOR
) {
564 hashp
->err
= errno
= EINVAL
;
567 if ((hashp
->flags
& O_ACCMODE
) == O_RDONLY
) {
568 hashp
->err
= errno
= EPERM
;
571 return hash_access(hashp
, HASH_DELETE
, __UNCONST(key
), NULL
);
575 * Assume that hashp has been set in wrapper routine.
578 hash_access(HTAB
*hashp
, ACTION action
, DBT
*key
, DBT
*val
)
581 BUFHEAD
*bufp
, *save_bufp
;
588 #ifdef HASH_STATISTICS
594 kp
= (char *)key
->data
;
595 rbufp
= __get_buf(hashp
, __call_hash(hashp
, kp
, (int)size
), NULL
, 0);
600 /* Pin the bucket chain */
601 rbufp
->flags
|= BUF_PIN
;
602 for (bp
= (uint16_t *)(void *)rbufp
->page
, n
= *bp
++, ndx
= 1; ndx
< n
;)
603 if (bp
[1] >= REAL_KEY
) {
604 /* Real key/data pair */
605 if (size
== (size_t)(off
- *bp
) &&
606 memcmp(kp
, rbufp
->page
+ *bp
, size
) == 0)
609 #ifdef HASH_STATISTICS
614 } else if (bp
[1] == OVFLPAGE
) {
615 rbufp
= __get_buf(hashp
, (uint32_t)*bp
, rbufp
, 0);
617 save_bufp
->flags
&= ~BUF_PIN
;
621 bp
= (uint16_t *)(void *)rbufp
->page
;
625 } else if (bp
[1] < REAL_KEY
) {
627 __find_bigpair(hashp
, rbufp
, ndx
, kp
, (int)size
)) > 0)
632 __find_last_page(hashp
, &bufp
))) {
637 rbufp
= __get_buf(hashp
, (uint32_t)pageno
,
640 save_bufp
->flags
&= ~BUF_PIN
;
644 bp
= (uint16_t *)(void *)rbufp
->page
;
649 save_bufp
->flags
&= ~BUF_PIN
;
658 if (__addel(hashp
, rbufp
, key
, val
)) {
659 save_bufp
->flags
&= ~BUF_PIN
;
662 save_bufp
->flags
&= ~BUF_PIN
;
668 save_bufp
->flags
&= ~BUF_PIN
;
675 save_bufp
->flags
&= ~BUF_PIN
;
678 bp
= (uint16_t *)(void *)rbufp
->page
;
679 if (bp
[ndx
+ 1] < REAL_KEY
) {
680 if (__big_return(hashp
, rbufp
, ndx
, val
, 0))
683 val
->data
= (uint8_t *)rbufp
->page
+ (int)bp
[ndx
+ 1];
684 val
->size
= bp
[ndx
] - bp
[ndx
+ 1];
688 if ((__delpair(hashp
, rbufp
, ndx
)) ||
689 (__addel(hashp
, rbufp
, key
, val
))) {
690 save_bufp
->flags
&= ~BUF_PIN
;
695 if (__delpair(hashp
, rbufp
, ndx
))
701 save_bufp
->flags
&= ~BUF_PIN
;
706 hash_seq(const DB
*dbp
, DBT
*key
, DBT
*data
, uint32_t flag
)
709 BUFHEAD
*bufp
= NULL
; /* XXX: gcc */
713 hashp
= dbp
->internal
;
714 if (flag
&& flag
!= R_FIRST
&& flag
!= R_NEXT
) {
715 hashp
->err
= errno
= EINVAL
;
718 #ifdef HASH_STATISTICS
721 if ((hashp
->cbucket
< 0) || (flag
== R_FIRST
)) {
727 for (bp
= NULL
; !bp
|| !bp
[0]; ) {
728 if (!(bufp
= hashp
->cpage
)) {
729 for (bucket
= hashp
->cbucket
;
730 bucket
<= (uint32_t)hashp
->MAX_BUCKET
;
731 bucket
++, hashp
->cndx
= 1) {
732 bufp
= __get_buf(hashp
, bucket
, NULL
, 0);
736 bp
= (uint16_t *)(void *)bufp
->page
;
740 hashp
->cbucket
= bucket
;
741 if (hashp
->cbucket
> hashp
->MAX_BUCKET
) {
746 bp
= (uint16_t *)(void *)hashp
->cpage
->page
;
748 _DIAGASSERT(bp
!= NULL
);
749 _DIAGASSERT(bufp
!= NULL
);
750 while (bp
[hashp
->cndx
+ 1] == OVFLPAGE
) {
751 bufp
= hashp
->cpage
=
752 __get_buf(hashp
, (uint32_t)bp
[hashp
->cndx
], bufp
,
756 bp
= (uint16_t *)(void *)(bufp
->page
);
765 if (bp
[ndx
+ 1] < REAL_KEY
) {
766 if (__big_keydata(hashp
, bufp
, key
, data
, 1))
769 if (hashp
->cpage
== NULL
)
771 key
->data
= (uint8_t *)hashp
->cpage
->page
+ bp
[ndx
];
772 key
->size
= (ndx
> 1 ? bp
[ndx
- 1] : hashp
->BSIZE
) - bp
[ndx
];
773 data
->data
= (uint8_t *)hashp
->cpage
->page
+ bp
[ndx
+ 1];
774 data
->size
= bp
[ndx
] - bp
[ndx
+ 1];
786 /********************************* UTILITIES ************************/
794 __expand_table(HTAB
*hashp
)
796 uint32_t old_bucket
, new_bucket
;
797 int new_segnum
, spare_ndx
;
800 #ifdef HASH_STATISTICS
803 new_bucket
= ++hashp
->MAX_BUCKET
;
804 old_bucket
= (hashp
->MAX_BUCKET
& hashp
->LOW_MASK
);
806 new_segnum
= new_bucket
>> hashp
->SSHIFT
;
808 /* Check if we need a new segment */
809 if (new_segnum
>= hashp
->nsegs
) {
810 /* Check if we need to expand directory */
811 if (new_segnum
>= hashp
->DSIZE
) {
812 /* Reallocate directory */
813 dirsize
= hashp
->DSIZE
* sizeof(SEGMENT
*);
814 if (!hash_realloc(&hashp
->dir
, dirsize
, dirsize
<< 1))
816 hashp
->DSIZE
= dirsize
<< 1;
818 if ((hashp
->dir
[new_segnum
] =
819 calloc((size_t)hashp
->SGSIZE
, sizeof(SEGMENT
))) == NULL
)
825 * If the split point is increasing (MAX_BUCKET's log base 2
826 * * increases), we need to copy the current contents of the spare
827 * split bucket to the next bucket.
829 spare_ndx
= __log2((uint32_t)(hashp
->MAX_BUCKET
+ 1));
830 if (spare_ndx
> hashp
->OVFL_POINT
) {
831 hashp
->SPARES
[spare_ndx
] = hashp
->SPARES
[hashp
->OVFL_POINT
];
832 hashp
->OVFL_POINT
= spare_ndx
;
835 if (new_bucket
> (uint32_t)hashp
->HIGH_MASK
) {
836 /* Starting a new doubling */
837 hashp
->LOW_MASK
= hashp
->HIGH_MASK
;
838 hashp
->HIGH_MASK
= new_bucket
| hashp
->LOW_MASK
;
840 /* Relocate records to the new bucket */
841 return (__split_page(hashp
, old_bucket
, new_bucket
));
845 * If realloc guarantees that the pointer is not destroyed if the realloc
846 * fails, then this routine can go away.
849 hash_realloc(SEGMENT
**p_ptr
, size_t oldsize
, size_t newsize
)
853 if ((p
= malloc(newsize
)) != NULL
) {
854 memmove(p
, *p_ptr
, oldsize
);
855 memset((char *)p
+ oldsize
, 0, newsize
- oldsize
);
863 __call_hash(HTAB
*hashp
, char *k
, int len
)
867 n
= hashp
->hash(k
, (size_t)len
);
868 bucket
= n
& hashp
->HIGH_MASK
;
869 if (bucket
> hashp
->MAX_BUCKET
)
870 bucket
= bucket
& hashp
->LOW_MASK
;
875 * Allocate segment table. On error, destroy the table and set errno.
877 * Returns 0 on success
880 alloc_segs(HTAB
*hashp
, int nsegs
)
887 hashp
->dir
= calloc((size_t)hashp
->DSIZE
, sizeof(SEGMENT
*));
888 if (hashp
->dir
== NULL
) {
890 (void)hdestroy(hashp
);
894 hashp
->nsegs
= nsegs
;
897 /* Allocate segments */
898 store
= calloc((size_t)(nsegs
<< hashp
->SSHIFT
), sizeof(SEGMENT
));
901 (void)hdestroy(hashp
);
905 for (i
= 0; i
< nsegs
; i
++)
906 hashp
->dir
[i
] = &store
[i
<< hashp
->SSHIFT
];
910 #if BYTE_ORDER == LITTLE_ENDIAN
912 * Hashp->hdr needs to be byteswapped.
915 swap_header_copy(HASHHDR
*srcp
, HASHHDR
*destp
)
919 P_32_COPY(srcp
->magic
, destp
->magic
);
920 P_32_COPY(srcp
->version
, destp
->version
);
921 P_32_COPY(srcp
->lorder
, destp
->lorder
);
922 P_32_COPY(srcp
->bsize
, destp
->bsize
);
923 P_32_COPY(srcp
->bshift
, destp
->bshift
);
924 P_32_COPY(srcp
->dsize
, destp
->dsize
);
925 P_32_COPY(srcp
->ssize
, destp
->ssize
);
926 P_32_COPY(srcp
->sshift
, destp
->sshift
);
927 P_32_COPY(srcp
->ovfl_point
, destp
->ovfl_point
);
928 P_32_COPY(srcp
->last_freed
, destp
->last_freed
);
929 P_32_COPY(srcp
->max_bucket
, destp
->max_bucket
);
930 P_32_COPY(srcp
->high_mask
, destp
->high_mask
);
931 P_32_COPY(srcp
->low_mask
, destp
->low_mask
);
932 P_32_COPY(srcp
->ffactor
, destp
->ffactor
);
933 P_32_COPY(srcp
->nkeys
, destp
->nkeys
);
934 P_32_COPY(srcp
->hdrpages
, destp
->hdrpages
);
935 P_32_COPY(srcp
->h_charkey
, destp
->h_charkey
);
936 for (i
= 0; i
< NCACHED
; i
++) {
937 P_32_COPY(srcp
->spares
[i
], destp
->spares
[i
]);
938 P_16_COPY(srcp
->bitmaps
[i
], destp
->bitmaps
[i
]);
943 swap_header(HTAB
*hashp
)
950 M_32_SWAP(hdrp
->magic
);
951 M_32_SWAP(hdrp
->version
);
952 M_32_SWAP(hdrp
->lorder
);
953 M_32_SWAP(hdrp
->bsize
);
954 M_32_SWAP(hdrp
->bshift
);
955 M_32_SWAP(hdrp
->dsize
);
956 M_32_SWAP(hdrp
->ssize
);
957 M_32_SWAP(hdrp
->sshift
);
958 M_32_SWAP(hdrp
->ovfl_point
);
959 M_32_SWAP(hdrp
->last_freed
);
960 M_32_SWAP(hdrp
->max_bucket
);
961 M_32_SWAP(hdrp
->high_mask
);
962 M_32_SWAP(hdrp
->low_mask
);
963 M_32_SWAP(hdrp
->ffactor
);
964 M_32_SWAP(hdrp
->nkeys
);
965 M_32_SWAP(hdrp
->hdrpages
);
966 M_32_SWAP(hdrp
->h_charkey
);
967 for (i
= 0; i
< NCACHED
; i
++) {
968 M_32_SWAP(hdrp
->spares
[i
]);
969 M_16_SWAP(hdrp
->bitmaps
[i
]);