modified: src1/input.c
[GalaxyCodeBases.git] / c_cpp / lib / klib / kbtree.h
blob8b4f9176bf7885db953863acfb01bf6c38fce528
1 /*-
2 * Copyright 1997-1999, 2001, John-Mark Gurney.
3 * 2008-2009, Attractive Chaos <attractor@live.co.uk>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
28 #ifndef __AC_KBTREE_H
29 #define __AC_KBTREE_H
31 #include <stdlib.h>
32 #include <string.h>
33 #include <stdint.h>
35 #define KB_MAX_DEPTH 64
37 typedef struct {
38 int32_t is_internal:1, n:31;
39 } kbnode_t;
41 typedef struct {
42 kbnode_t *x;
43 int i;
44 } kbpos_t;
46 typedef struct {
47 kbpos_t stack[KB_MAX_DEPTH], *p;
48 } kbitr_t;
50 #define __KB_KEY(type, x) ((type*)((char*)x + 4))
51 #define __KB_PTR(btr, x) ((kbnode_t**)((char*)x + btr->off_ptr))
53 #define __KB_TREE_T(name) \
54 typedef struct { \
55 kbnode_t *root; \
56 int off_key, off_ptr, ilen, elen; \
57 int n, t; \
58 int n_keys, n_nodes; \
59 } kbtree_##name##_t;
61 #define __KB_INIT(name, key_t) \
62 kbtree_##name##_t *kb_init_##name(int size) \
63 { \
64 kbtree_##name##_t *b; \
65 b = (kbtree_##name##_t*)calloc(1, sizeof(kbtree_##name##_t)); \
66 b->t = ((size - 4 - sizeof(void*)) / (sizeof(void*) + sizeof(key_t)) + 1) >> 1; \
67 if (b->t < 2) { \
68 free(b); return 0; \
69 } \
70 b->n = 2 * b->t - 1; \
71 b->off_ptr = 4 + b->n * sizeof(key_t); \
72 b->ilen = (4 + sizeof(void*) + b->n * (sizeof(void*) + sizeof(key_t)) + 3) >> 2 << 2; \
73 b->elen = (b->off_ptr + 3) >> 2 << 2; \
74 b->root = (kbnode_t*)calloc(1, b->ilen); \
75 ++b->n_nodes; \
76 return b; \
79 #define __kb_destroy(b) do { \
80 int i, max = 8; \
81 kbnode_t *x, **top, **stack = 0; \
82 if (b) { \
83 top = stack = (kbnode_t**)calloc(max, sizeof(kbnode_t*)); \
84 *top++ = (b)->root; \
85 while (top != stack) { \
86 x = *--top; \
87 if (x->is_internal == 0) { free(x); continue; } \
88 for (i = 0; i <= x->n; ++i) \
89 if (__KB_PTR(b, x)[i]) { \
90 if (top - stack == max) { \
91 max <<= 1; \
92 stack = (kbnode_t**)realloc(stack, max * sizeof(kbnode_t*)); \
93 top = stack + (max>>1); \
94 } \
95 *top++ = __KB_PTR(b, x)[i]; \
96 } \
97 free(x); \
98 } \
99 } \
100 free(b); free(stack); \
101 } while (0)
103 #define __KB_GET_AUX1(name, key_t, __cmp) \
104 static inline int __kb_getp_aux_##name(const kbnode_t * __restrict x, const key_t * __restrict k, int *r) \
106 int tr, *rr, begin = 0, end = x->n; \
107 if (x->n == 0) return -1; \
108 rr = r? r : &tr; \
109 while (begin < end) { \
110 int mid = (begin + end) >> 1; \
111 if (__cmp(__KB_KEY(key_t, x)[mid], *k) < 0) begin = mid + 1; \
112 else end = mid; \
114 if (begin == x->n) { *rr = 1; return x->n - 1; } \
115 if ((*rr = __cmp(*k, __KB_KEY(key_t, x)[begin])) < 0) --begin; \
116 return begin; \
119 #define __KB_GET(name, key_t) \
120 static key_t *kb_getp_##name(kbtree_##name##_t *b, const key_t * __restrict k) \
122 int i, r = 0; \
123 kbnode_t *x = b->root; \
124 while (x) { \
125 i = __kb_getp_aux_##name(x, k, &r); \
126 if (i >= 0 && r == 0) return &__KB_KEY(key_t, x)[i]; \
127 if (x->is_internal == 0) return 0; \
128 x = __KB_PTR(b, x)[i + 1]; \
130 return 0; \
132 static inline key_t *kb_get_##name(kbtree_##name##_t *b, const key_t k) \
134 return kb_getp_##name(b, &k); \
137 #define __KB_INTERVAL(name, key_t) \
138 static void kb_intervalp_##name(kbtree_##name##_t *b, const key_t * __restrict k, key_t **lower, key_t **upper) \
140 int i, r = 0; \
141 kbnode_t *x = b->root; \
142 *lower = *upper = 0; \
143 while (x) { \
144 i = __kb_getp_aux_##name(x, k, &r); \
145 if (i >= 0 && r == 0) { \
146 *lower = *upper = &__KB_KEY(key_t, x)[i]; \
147 return; \
149 if (i >= 0) *lower = &__KB_KEY(key_t, x)[i]; \
150 if (i < x->n - 1) *upper = &__KB_KEY(key_t, x)[i + 1]; \
151 if (x->is_internal == 0) return; \
152 x = __KB_PTR(b, x)[i + 1]; \
155 static inline void kb_interval_##name(kbtree_##name##_t *b, const key_t k, key_t **lower, key_t **upper) \
157 kb_intervalp_##name(b, &k, lower, upper); \
160 #define __KB_PUT(name, key_t, __cmp) \
161 /* x must be an internal node */ \
162 static void __kb_split_##name(kbtree_##name##_t *b, kbnode_t *x, int i, kbnode_t *y) \
164 kbnode_t *z; \
165 z = (kbnode_t*)calloc(1, y->is_internal? b->ilen : b->elen); \
166 ++b->n_nodes; \
167 z->is_internal = y->is_internal; \
168 z->n = b->t - 1; \
169 memcpy(__KB_KEY(key_t, z), __KB_KEY(key_t, y) + b->t, sizeof(key_t) * (b->t - 1)); \
170 if (y->is_internal) memcpy(__KB_PTR(b, z), __KB_PTR(b, y) + b->t, sizeof(void*) * b->t); \
171 y->n = b->t - 1; \
172 memmove(__KB_PTR(b, x) + i + 2, __KB_PTR(b, x) + i + 1, sizeof(void*) * (x->n - i)); \
173 __KB_PTR(b, x)[i + 1] = z; \
174 memmove(__KB_KEY(key_t, x) + i + 1, __KB_KEY(key_t, x) + i, sizeof(key_t) * (x->n - i)); \
175 __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[b->t - 1]; \
176 ++x->n; \
178 static key_t *__kb_putp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, const key_t * __restrict k) \
180 int i = x->n - 1; \
181 key_t *ret; \
182 if (x->is_internal == 0) { \
183 i = __kb_getp_aux_##name(x, k, 0); \
184 if (i != x->n - 1) \
185 memmove(__KB_KEY(key_t, x) + i + 2, __KB_KEY(key_t, x) + i + 1, (x->n - i - 1) * sizeof(key_t)); \
186 ret = &__KB_KEY(key_t, x)[i + 1]; \
187 *ret = *k; \
188 ++x->n; \
189 } else { \
190 i = __kb_getp_aux_##name(x, k, 0) + 1; \
191 if (__KB_PTR(b, x)[i]->n == 2 * b->t - 1) { \
192 __kb_split_##name(b, x, i, __KB_PTR(b, x)[i]); \
193 if (__cmp(*k, __KB_KEY(key_t, x)[i]) > 0) ++i; \
195 ret = __kb_putp_aux_##name(b, __KB_PTR(b, x)[i], k); \
197 return ret; \
199 static key_t *kb_putp_##name(kbtree_##name##_t *b, const key_t * __restrict k) \
201 kbnode_t *r, *s; \
202 ++b->n_keys; \
203 r = b->root; \
204 if (r->n == 2 * b->t - 1) { \
205 ++b->n_nodes; \
206 s = (kbnode_t*)calloc(1, b->ilen); \
207 b->root = s; s->is_internal = 1; s->n = 0; \
208 __KB_PTR(b, s)[0] = r; \
209 __kb_split_##name(b, s, 0, r); \
210 r = s; \
212 return __kb_putp_aux_##name(b, r, k); \
214 static inline void kb_put_##name(kbtree_##name##_t *b, const key_t k) \
216 kb_putp_##name(b, &k); \
220 #define __KB_DEL(name, key_t) \
221 static key_t __kb_delp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, const key_t * __restrict k, int s) \
223 int yn, zn, i, r = 0; \
224 kbnode_t *xp, *y, *z; \
225 key_t kp; \
226 if (x == 0) return *k; \
227 if (s) { /* s can only be 0, 1 or 2 */ \
228 r = x->is_internal == 0? 0 : s == 1? 1 : -1; \
229 i = s == 1? x->n - 1 : -1; \
230 } else i = __kb_getp_aux_##name(x, k, &r); \
231 if (x->is_internal == 0) { \
232 if (s == 2) ++i; \
233 kp = __KB_KEY(key_t, x)[i]; \
234 memmove(__KB_KEY(key_t, x) + i, __KB_KEY(key_t, x) + i + 1, (x->n - i - 1) * sizeof(key_t)); \
235 --x->n; \
236 return kp; \
238 if (r == 0) { \
239 if ((yn = __KB_PTR(b, x)[i]->n) >= b->t) { \
240 xp = __KB_PTR(b, x)[i]; \
241 kp = __KB_KEY(key_t, x)[i]; \
242 __KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 1); \
243 return kp; \
244 } else if ((zn = __KB_PTR(b, x)[i + 1]->n) >= b->t) { \
245 xp = __KB_PTR(b, x)[i + 1]; \
246 kp = __KB_KEY(key_t, x)[i]; \
247 __KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 2); \
248 return kp; \
249 } else if (yn == b->t - 1 && zn == b->t - 1) { \
250 y = __KB_PTR(b, x)[i]; z = __KB_PTR(b, x)[i + 1]; \
251 __KB_KEY(key_t, y)[y->n++] = *k; \
252 memmove(__KB_KEY(key_t, y) + y->n, __KB_KEY(key_t, z), z->n * sizeof(key_t)); \
253 if (y->is_internal) memmove(__KB_PTR(b, y) + y->n, __KB_PTR(b, z), (z->n + 1) * sizeof(void*)); \
254 y->n += z->n; \
255 memmove(__KB_KEY(key_t, x) + i, __KB_KEY(key_t, x) + i + 1, (x->n - i - 1) * sizeof(key_t)); \
256 memmove(__KB_PTR(b, x) + i + 1, __KB_PTR(b, x) + i + 2, (x->n - i - 1) * sizeof(void*)); \
257 --x->n; \
258 free(z); \
259 return __kb_delp_aux_##name(b, y, k, s); \
262 ++i; \
263 if ((xp = __KB_PTR(b, x)[i])->n == b->t - 1) { \
264 if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n >= b->t) { \
265 memmove(__KB_KEY(key_t, xp) + 1, __KB_KEY(key_t, xp), xp->n * sizeof(key_t)); \
266 if (xp->is_internal) memmove(__KB_PTR(b, xp) + 1, __KB_PTR(b, xp), (xp->n + 1) * sizeof(void*)); \
267 __KB_KEY(key_t, xp)[0] = __KB_KEY(key_t, x)[i - 1]; \
268 __KB_KEY(key_t, x)[i - 1] = __KB_KEY(key_t, y)[y->n - 1]; \
269 if (xp->is_internal) __KB_PTR(b, xp)[0] = __KB_PTR(b, y)[y->n]; \
270 --y->n; ++xp->n; \
271 } else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n >= b->t) { \
272 __KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
273 __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[0]; \
274 if (xp->is_internal) __KB_PTR(b, xp)[xp->n] = __KB_PTR(b, y)[0]; \
275 --y->n; \
276 memmove(__KB_KEY(key_t, y), __KB_KEY(key_t, y) + 1, y->n * sizeof(key_t)); \
277 if (y->is_internal) memmove(__KB_PTR(b, y), __KB_PTR(b, y) + 1, (y->n + 1) * sizeof(void*)); \
278 } else if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n == b->t - 1) { \
279 __KB_KEY(key_t, y)[y->n++] = __KB_KEY(key_t, x)[i - 1]; \
280 memmove(__KB_KEY(key_t, y) + y->n, __KB_KEY(key_t, xp), xp->n * sizeof(key_t)); \
281 if (y->is_internal) memmove(__KB_PTR(b, y) + y->n, __KB_PTR(b, xp), (xp->n + 1) * sizeof(void*)); \
282 y->n += xp->n; \
283 memmove(__KB_KEY(key_t, x) + i - 1, __KB_KEY(key_t, x) + i, (x->n - i) * sizeof(key_t)); \
284 memmove(__KB_PTR(b, x) + i, __KB_PTR(b, x) + i + 1, (x->n - i) * sizeof(void*)); \
285 --x->n; \
286 free(xp); \
287 xp = y; \
288 } else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n == b->t - 1) { \
289 __KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
290 memmove(__KB_KEY(key_t, xp) + xp->n, __KB_KEY(key_t, y), y->n * sizeof(key_t)); \
291 if (xp->is_internal) memmove(__KB_PTR(b, xp) + xp->n, __KB_PTR(b, y), (y->n + 1) * sizeof(void*)); \
292 xp->n += y->n; \
293 memmove(__KB_KEY(key_t, x) + i, __KB_KEY(key_t, x) + i + 1, (x->n - i - 1) * sizeof(key_t)); \
294 memmove(__KB_PTR(b, x) + i + 1, __KB_PTR(b, x) + i + 2, (x->n - i - 1) * sizeof(void*)); \
295 --x->n; \
296 free(y); \
299 return __kb_delp_aux_##name(b, xp, k, s); \
301 static key_t kb_delp_##name(kbtree_##name##_t *b, const key_t * __restrict k) \
303 kbnode_t *x; \
304 key_t ret; \
305 ret = __kb_delp_aux_##name(b, b->root, k, 0); \
306 --b->n_keys; \
307 if (b->root->n == 0 && b->root->is_internal) { \
308 --b->n_nodes; \
309 x = b->root; \
310 b->root = __KB_PTR(b, x)[0]; \
311 free(x); \
313 return ret; \
315 static inline key_t kb_del_##name(kbtree_##name##_t *b, const key_t k) \
317 return kb_delp_##name(b, &k); \
320 #define __KB_ITR(name, key_t) \
321 static inline void kb_itr_first_##name(kbtree_##name##_t *b, kbitr_t *itr) \
323 itr->p = 0; \
324 if (b->n_keys == 0) return; \
325 itr->p = itr->stack; \
326 itr->p->x = b->root; itr->p->i = 0; \
327 while (itr->p->x->is_internal && __KB_PTR(b, itr->p->x)[0] != 0) { \
328 kbnode_t *x = itr->p->x; \
329 ++itr->p; \
330 itr->p->x = __KB_PTR(b, x)[0]; itr->p->i = 0; \
333 static int kb_itr_get_##name(kbtree_##name##_t *b, const key_t * __restrict k, kbitr_t *itr) \
335 int i, r = 0; \
336 itr->p = itr->stack; \
337 itr->p->x = b->root; itr->p->i = 0; \
338 while (itr->p->x) { \
339 i = __kb_getp_aux_##name(itr->p->x, k, &r); \
340 if (i >= 0 && r == 0) return 0; \
341 if (itr->p->x->is_internal == 0) return -1; \
342 itr->p[1].x = __KB_PTR(b, itr->p->x)[i + 1]; \
343 itr->p[1].i = i; \
344 ++itr->p; \
346 return -1; \
348 static inline int kb_itr_next_##name(kbtree_##name##_t *b, kbitr_t *itr) \
350 if (itr->p < itr->stack) return 0; \
351 for (;;) { \
352 ++itr->p->i; \
353 while (itr->p->x && itr->p->i <= itr->p->x->n) { \
354 itr->p[1].i = 0; \
355 itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \
356 ++itr->p; \
358 --itr->p; \
359 if (itr->p < itr->stack) return 0; \
360 if (itr->p->x && itr->p->i < itr->p->x->n) return 1; \
364 #define KBTREE_INIT(name, key_t, __cmp) \
365 __KB_TREE_T(name) \
366 __KB_INIT(name, key_t) \
367 __KB_GET_AUX1(name, key_t, __cmp) \
368 __KB_GET(name, key_t) \
369 __KB_INTERVAL(name, key_t) \
370 __KB_PUT(name, key_t, __cmp) \
371 __KB_DEL(name, key_t) \
372 __KB_ITR(name, key_t)
374 #define KB_DEFAULT_SIZE 512
376 #define kbtree_t(name) kbtree_##name##_t
377 #define kb_init(name, s) kb_init_##name(s)
378 #define kb_destroy(name, b) __kb_destroy(b)
379 #define kb_get(name, b, k) kb_get_##name(b, k)
380 #define kb_put(name, b, k) kb_put_##name(b, k)
381 #define kb_del(name, b, k) kb_del_##name(b, k)
382 #define kb_interval(name, b, k, l, u) kb_interval_##name(b, k, l, u)
383 #define kb_getp(name, b, k) kb_getp_##name(b, k)
384 #define kb_putp(name, b, k) kb_putp_##name(b, k)
385 #define kb_delp(name, b, k) kb_delp_##name(b, k)
386 #define kb_intervalp(name, b, k, l, u) kb_intervalp_##name(b, k, l, u)
388 #define kb_itr_first(name, b, i) kb_itr_first_##name(b, i)
389 #define kb_itr_get(name, b, k, i) kb_itr_get_##name(b, k, i)
390 #define kb_itr_next(name, b, i) kb_itr_next_##name(b, i)
391 #define kb_itr_key(type, itr) __KB_KEY(type, (itr)->p->x)[(itr)->p->i]
392 #define kb_itr_valid(itr) ((itr)->p >= (itr)->stack)
394 #define kb_size(b) ((b)->n_keys)
396 #define kb_generic_cmp(a, b) (((b) < (a)) - ((a) < (b)))
397 #define kb_str_cmp(a, b) strcmp(a, b)
399 /* The following is *DEPRECATED*!!! Use the iterator interface instead! */
401 typedef struct {
402 kbnode_t *x;
403 int i;
404 } __kbstack_t;
406 #define __kb_traverse(key_t, b, __func) do { \
407 int __kmax = 8; \
408 __kbstack_t *__kstack, *__kp; \
409 __kp = __kstack = (__kbstack_t*)calloc(__kmax, sizeof(__kbstack_t)); \
410 __kp->x = (b)->root; __kp->i = 0; \
411 for (;;) { \
412 while (__kp->x && __kp->i <= __kp->x->n) { \
413 if (__kp - __kstack == __kmax - 1) { \
414 __kmax <<= 1; \
415 __kstack = (__kbstack_t*)realloc(__kstack, __kmax * sizeof(__kbstack_t)); \
416 __kp = __kstack + (__kmax>>1) - 1; \
418 (__kp+1)->i = 0; (__kp+1)->x = __kp->x->is_internal? __KB_PTR(b, __kp->x)[__kp->i] : 0; \
419 ++__kp; \
421 --__kp; \
422 if (__kp >= __kstack) { \
423 if (__kp->x && __kp->i < __kp->x->n) __func(&__KB_KEY(key_t, __kp->x)[__kp->i]); \
424 ++__kp->i; \
425 } else break; \
427 free(__kstack); \
428 } while (0)
430 #define __kb_get_first(key_t, b, ret) do { \
431 kbnode_t *__x = (b)->root; \
432 while (__KB_PTR(b, __x)[0] != 0) \
433 __x = __KB_PTR(b, __x)[0]; \
434 (ret) = __KB_KEY(key_t, __x)[0]; \
435 } while (0)
437 #endif