2 This file is part of the software library CADLIB written by Conrad Ziesler
3 Copyright 2003, Conrad Ziesler, all rights reserved.
5 *************************
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 /* sort.c, sorting routines
24 this code is adapted from gnu glibc qsort.c, and so their header is as follows:
25 reason: stdlib sort interface does not allow for indirection in cmp without using hackish global
26 variables. also, we rely heavily on list already, so this makes the interface cleaner
31 /* Copyright (C) 1991, 1992, 1996, 1997 Free Software Foundation, Inc.
32 This file is part of the GNU C Library.
33 Written by Douglas C. Schmidt (schmidt@ics.uci.edu).
35 The GNU C Library is free software; you can redistribute it and/or
36 modify it under the terms of the GNU Library General Public License as
37 published by the Free Software Foundation; either version 2 of the
38 License, or (at your option) any later version.
40 The GNU C Library is distributed in the hope that it will be useful,
41 but WITHOUT ANY WARRANTY; without even the implied warranty of
42 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 Library General Public License for more details.
45 You should have received a copy of the GNU Library General Public
46 License along with the GNU C Library; see the file COPYING.LIB. If not,
47 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
48 Boston, MA 02111-1307, USA. */
54 #include "list_search.h"
57 /* Byte-wise swap two items of size SIZE. */
58 #define SWAP(a, b, size) \
61 register size_t __size = (size); \
62 register char *__a = (a), *__b = (b); \
68 } while (--__size > 0); \
71 /* Discontinue quicksort algorithm when partition gets below this size.
72 This particular magic number was chosen to work best on a Sun 4/260. */
75 /* Stack node declarations used to store unfulfilled partition obligations. */
82 /* The next 4 #defines implement a very fast in-line stack abstraction. */
83 #define STACK_SIZE (8 * sizeof(unsigned long int))
84 #define PUSH(low, high) ((void) ((top->lo = (low)), (top->hi = (high)), ++top))
85 #define POP(low, high) ((void) (--top, (low = top->lo), (high = top->hi)))
86 #define STACK_NOT_EMPTY (stack < top)
89 /* Order size using quicksort. This implementation incorporates
90 four optimizations discussed in Sedgewick:
92 1. Non-recursive, using an explicit stack of pointer that store the
93 next array partition to sort. To save time, this maximum amount
94 of space required to store an array of MAX_INT is allocated on the
95 stack. Assuming a 32-bit integer, this needs only 32 *
96 sizeof(stack_node) == 136 bits. Pretty cheap, actually.
98 2. Chose the pivot element using a median-of-three decision tree.
99 This reduces the probability of selecting a bad pivot value and
100 eliminates certain extraneous comparisons.
102 3. Only quicksorts TOTAL_ELEMS / MAX_THRESH partitions, leaving
103 insertion sort to order the MAX_THRESH items within each partition.
104 This is a big win, since insertion sort is faster for small, mostly
105 sorted array segments.
107 4. The larger of the two sub-partitions is always pushed onto the
108 stack first, with the algorithm then concentrating on the
109 smaller partition. This *guarantees* no more than log (n)
110 stack size is needed (actually O(1) in this case)! */
113 #define CMP(a,b) (sf->cmpf(sf,a,b))
116 void list_qsort (sort_func_t
*sf
)
118 size_t total_elems
=sf
->tosort
->q
;
119 size_t size
=sf
->tosort
->s
;
120 void *const pbase
=sf
->tosort
->d
;
121 register char *base_ptr
= (char *) pbase
;
123 /* Allocating SIZE bytes for a pivot buffer facilitates a better
124 algorithm below since we can do comparisons directly on the pivot. */
125 char *pivot_buffer
= (char *) malloc (size
);
126 const size_t max_thresh
= MAX_THRESH
* size
;
128 assert(pivot_buffer
!=NULL
);
129 memset(pivot_buffer
,0,size
);
131 if (total_elems
== 0)
132 /* Avoid lossage with unsigned arithmetic below. */
133 { free(pivot_buffer
); return; }
135 if (total_elems
> MAX_THRESH
)
138 char *hi
= &lo
[size
* (total_elems
- 1)];
139 /* Largest size needed for 32-bit int!!! */
140 stack_node stack
[STACK_SIZE
];
141 stack_node
*top
= stack
+ 1;
143 while (STACK_NOT_EMPTY
)
148 char *pivot
= pivot_buffer
;
150 /* Select median value from among LO, MID, and HI. Rearrange
151 LO and HI so the three values are sorted. This lowers the
152 probability of picking a pathological pivot value and
153 skips a comparison for both the LEFT_PTR and RIGHT_PTR. */
155 char *mid
= lo
+ size
* ((hi
- lo
) / size
>> 1);
157 if (CMP ((void *) mid
, (void *) lo
) < 0)
158 SWAP (mid
, lo
, size
);
159 if (CMP ((void *) hi
, (void *) mid
) < 0)
160 SWAP (mid
, hi
, size
);
163 if (CMP ((void *) mid
, (void *) lo
) < 0)
164 SWAP (mid
, lo
, size
);
166 memcpy (pivot
, mid
, size
);
167 pivot
= pivot_buffer
;
169 left_ptr
= lo
+ size
;
170 right_ptr
= hi
- size
;
172 /* Here's the famous ``collapse the walls'' section of quicksort.
173 Gotta like those tight inner loops! They are the main reason
174 that this algorithm runs much faster than others. */
177 while (CMP ((void *) left_ptr
, (void *) pivot
) < 0)
180 while (CMP ((void *) pivot
, (void *) right_ptr
) < 0)
183 if (left_ptr
< right_ptr
)
185 SWAP (left_ptr
, right_ptr
, size
);
189 else if (left_ptr
== right_ptr
)
196 while (left_ptr
<= right_ptr
);
198 /* Set up pointers for next iteration. First determine whether
199 left and right partitions are below the threshold size. If so,
200 ignore one or both. Otherwise, push the larger partition's
201 bounds on the stack and continue sorting the smaller one. */
203 if ((size_t) (right_ptr
- lo
) <= max_thresh
)
205 if ((size_t) (hi
- left_ptr
) <= max_thresh
)
206 /* Ignore both small partitions. */
209 /* Ignore small left partition. */
212 else if ((size_t) (hi
- left_ptr
) <= max_thresh
)
213 /* Ignore small right partition. */
215 else if ((right_ptr
- lo
) > (hi
- left_ptr
))
217 /* Push larger left partition indices. */
218 PUSH (lo
, right_ptr
);
223 /* Push larger right partition indices. */
230 /* Once the BASE_PTR array is partially sorted by quicksort the rest
231 is completely sorted using insertion sort, since this is efficient
232 for partitions below MAX_THRESH size. BASE_PTR points to the beginning
233 of the array to sort, and END_PTR points at the very last element in
234 the array (*not* one beyond it!). */
236 #define min(x, y) ((x) < (y) ? (x) : (y))
239 char *const end_ptr
= &base_ptr
[size
* (total_elems
- 1)];
240 char *tmp_ptr
= base_ptr
;
241 char *thresh
= min(end_ptr
, base_ptr
+ max_thresh
);
242 register char *run_ptr
;
244 /* Find smallest element in first threshold and place it at the
245 array's beginning. This is the smallest array element,
246 and the operation speeds up insertion sort's inner loop. */
248 for (run_ptr
= tmp_ptr
+ size
; run_ptr
<= thresh
; run_ptr
+= size
)
249 if (CMP ((void *) run_ptr
, (void *) tmp_ptr
) < 0)
252 if (tmp_ptr
!= base_ptr
)
253 SWAP (tmp_ptr
, base_ptr
, size
);
255 /* Insertion sort, running from left-hand-side up to right-hand-side. */
257 run_ptr
= base_ptr
+ size
;
258 while ((run_ptr
+= size
) <= end_ptr
)
260 tmp_ptr
= run_ptr
- size
;
261 while (CMP ((void *) run_ptr
, (void *) tmp_ptr
) < 0)
265 if (tmp_ptr
!= run_ptr
)
269 trav
= run_ptr
+ size
;
270 while (--trav
>= run_ptr
)
275 for (hi
= lo
= trav
; (lo
-= size
) >= tmp_ptr
; hi
= lo
)
282 { free(pivot_buffer
); return; }
293 /**** support for search list bindings. we copy and modify the qsort
294 to reduce overhead of extra bindings -- conrad
299 /* swap optimized for integers */
300 #define SWAPINT(a, b) \
303 register int *__a = (void *)(a), *__b = (void *)(b), __tmp; \
304 __tmp=*__a; *__a=*__b; *__a=__tmp; \
308 #define CMPIND(a,b) (psp_cmpf( psp->user, source_d+(source_s*(*((int*)a))), source_d+(source_s*(*((int*)b)))))
309 #define size (sizeof(int))
311 void list_search_qsort (list_search_t
*lsp
, list_psearch_t
*psp
)
315 int (*psp_cmpf
)(const void *user
,const void *a
, const void *b
);
316 size_t total_elems
=lsp
->list
.q
;
318 void *const pbase
=psp
->isearchdata
;
319 register char *base_ptr
;
320 int pivot_buffer_el
=0;
321 char *pivot_buffer
= (char *) &pivot_buffer_el
;
322 const size_t max_thresh
= MAX_THRESH
* size
;
324 base_ptr
= (char *) pbase
;
325 /* copy out of pointers to stack variables so compile can do memory scheduling */
326 source_d
=lsp
->list
.d
;
327 source_s
=lsp
->list
.s
;
330 if (total_elems
== 0) return;
332 if (total_elems
> MAX_THRESH
)
335 char *hi
= &lo
[size
* (total_elems
- 1)];
336 /* Largest size needed for 32-bit int!!! */
337 stack_node stack
[STACK_SIZE
];
338 stack_node
*top
= stack
+ 1;
340 while (STACK_NOT_EMPTY
)
345 char *pivot
= pivot_buffer
;
347 /* Select median value from among LO, MID, and HI. Rearrange
348 LO and HI so the three values are sorted. This lowers the
349 probability of picking a pathological pivot value and
350 skips a comparison for both the LEFT_PTR and RIGHT_PTR. */
352 char *mid
= lo
+ size
* ((hi
- lo
) / size
>> 1);
356 if (CMPIND ((void *) mid
, (void *) lo
) < 0)
358 if (CMPIND ((void *) hi
, (void *) mid
) < 0)
362 if (CMPIND ((void *) mid
, (void *) lo
) < 0)
365 (*((int*)pivot
)) = (*((int*)mid
));
366 pivot
= pivot_buffer
;
368 left_ptr
= lo
+ size
;
369 right_ptr
= hi
- size
;
371 /* Here's the famous ``collapse the walls'' section of quicksort.
372 Gotta like those tight inner loops! They are the main reason
373 that this algorithm runs much faster than others. */
376 while (CMPIND ((void *) left_ptr
, (void *) pivot
) < 0)
379 while (CMPIND ((void *) pivot
, (void *) right_ptr
) < 0)
382 if (left_ptr
< right_ptr
)
384 SWAPINT (left_ptr
, right_ptr
);
388 else if (left_ptr
== right_ptr
)
395 while (left_ptr
<= right_ptr
);
397 /* Set up pointers for next iteration. First determine whether
398 left and right partitions are below the threshold size. If so,
399 ignore one or both. Otherwise, push the larger partition's
400 bounds on the stack and continue sorting the smaller one. */
402 if ((size_t) (right_ptr
- lo
) <= max_thresh
)
404 if ((size_t) (hi
- left_ptr
) <= max_thresh
)
405 /* Ignore both small partitions. */
408 /* Ignore small left partition. */
411 else if ((size_t) (hi
- left_ptr
) <= max_thresh
)
412 /* Ignore small right partition. */
414 else if ((right_ptr
- lo
) > (hi
- left_ptr
))
416 /* Push larger left partition indices. */
417 PUSH (lo
, right_ptr
);
422 /* Push larger right partition indices. */
429 /* Once the BASE_PTR array is partially sorted by quicksort the rest
430 is completely sorted using insertion sort, since this is efficient
431 for partitions below MAX_THRESH size. BASE_PTR points to the beginning
432 of the array to sort, and END_PTR points at the very last element in
433 the array (*not* one beyond it!). */
435 #define min(x, y) ((x) < (y) ? (x) : (y))
438 char *const end_ptr
= &base_ptr
[size
* (total_elems
- 1)];
439 char *tmp_ptr
= base_ptr
;
440 char *thresh
= min(end_ptr
, base_ptr
+ max_thresh
);
441 register char *run_ptr
;
443 /* Find smallest element in first threshold and place it at the
444 array's beginning. This is the smallest array element,
445 and the operation speeds up insertion sort's inner loop. */
447 for (run_ptr
= tmp_ptr
+ size
; run_ptr
<= thresh
; run_ptr
+= size
)
448 if (CMPIND ((void *) run_ptr
, (void *) tmp_ptr
) < 0)
451 if (tmp_ptr
!= base_ptr
)
452 SWAPINT (tmp_ptr
, base_ptr
);
454 /* Insertion sort, running from left-hand-side up to right-hand-side. */
456 run_ptr
= base_ptr
+ size
;
457 while ((run_ptr
+= size
) <= end_ptr
)
459 tmp_ptr
= run_ptr
- size
;
460 while (CMPIND ((void *) run_ptr
, (void *) tmp_ptr
) < 0)
464 if (tmp_ptr
!= run_ptr
)
468 trav
= run_ptr
+ size
;
469 while (--trav
>= run_ptr
)
474 for (hi
= lo
= trav
; (lo
-= size
) >= tmp_ptr
; hi
= lo
)