1 /*-------------------------------------------------------------------------
4 * Generalized tuple sorting routines.
6 * This module handles sorting of heap tuples, index tuples, or single
7 * Datums (and could easily support other kinds of sortable objects,
8 * if necessary). It works efficiently for both small and large amounts
9 * of data. Small amounts are sorted in-memory using qsort(). Large
10 * amounts are sorted using temporary files and a standard external sort
13 * See Knuth, volume 3, for more than you want to know about the external
14 * sorting algorithm. We divide the input into sorted runs using replacement
15 * selection, in the form of a priority tree implemented as a heap
16 * (essentially his Algorithm 5.2.3H), then merge the runs using polyphase
17 * merge, Knuth's Algorithm 5.4.2D. The logical "tapes" used by Algorithm D
18 * are implemented by logtape.c, which avoids space wastage by recycling
19 * disk space as soon as each block is read from its "tape".
21 * We do not form the initial runs using Knuth's recommended replacement
22 * selection data structure (Algorithm 5.4.1R), because it uses a fixed
23 * number of records in memory at all times. Since we are dealing with
24 * tuples that may vary considerably in size, we want to be able to vary
25 * the number of records kept in memory to ensure full utilization of the
26 * allowed sort memory space. So, we keep the tuples in a variable-size
27 * heap, with the next record to go out at the top of the heap. Like
28 * Algorithm 5.4.1R, each record is stored with the run number that it
29 * must go into, and we use (run number, key) as the ordering key for the
30 * heap. When the run number at the top of the heap changes, we know that
31 * no more records of the prior run are left in the heap.
33 * The approximate amount of memory allowed for any one sort operation
34 * is specified in kilobytes by the caller (most pass work_mem). Initially,
35 * we absorb tuples and simply store them in an unsorted array as long as
36 * we haven't exceeded workMem. If we reach the end of the input without
37 * exceeding workMem, we sort the array using qsort() and subsequently return
38 * tuples just by scanning the tuple array sequentially. If we do exceed
39 * workMem, we construct a heap using Algorithm H and begin to emit tuples
40 * into sorted runs in temporary tapes, emitting just enough tuples at each
41 * step to get back within the workMem limit. Whenever the run number at
42 * the top of the heap changes, we begin a new run with a new output tape
43 * (selected per Algorithm D). After the end of the input is reached,
44 * we dump out remaining tuples in memory into a final run (or two),
45 * then merge the runs using Algorithm D.
47 * When merging runs, we use a heap containing just the frontmost tuple from
48 * each source run; we repeatedly output the smallest tuple and insert the
49 * next tuple from its source tape (if any). When the heap empties, the merge
50 * is complete. The basic merge algorithm thus needs very little memory ---
51 * only M tuples for an M-way merge, and M is constrained to a small number.
52 * However, we can still make good use of our full workMem allocation by
53 * pre-reading additional tuples from each source tape. Without prereading,
54 * our access pattern to the temporary file would be very erratic; on average
55 * we'd read one block from each of M source tapes during the same time that
56 * we're writing M blocks to the output tape, so there is no sequentiality of
57 * access at all, defeating the read-ahead methods used by most Unix kernels.
58 * Worse, the output tape gets written into a very random sequence of blocks
59 * of the temp file, ensuring that things will be even worse when it comes
60 * time to read that tape. A straightforward merge pass thus ends up doing a
61 * lot of waiting for disk seeks. We can improve matters by prereading from
62 * each source tape sequentially, loading about workMem/M bytes from each tape
63 * in turn. Then we run the merge algorithm, writing but not reading until
64 * one of the preloaded tuple series runs out. Then we switch back to preread
65 * mode, fill memory again, and repeat. This approach helps to localize both
66 * read and write accesses.
68 * When the caller requests random access to the sort result, we form
69 * the final sorted run on a logical tape which is then "frozen", so
70 * that we can access it randomly. When the caller does not need random
71 * access, we return from tuplesort_performsort() as soon as we are down
72 * to one run per logical tape. The final merge is then performed
73 * on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
74 * saves one cycle of writing all the data out to disk and reading it in.
76 * Before Postgres 8.2, we always used a seven-tape polyphase merge, on the
77 * grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
78 * to Knuth's figure 70 (section 5.4.2). However, Knuth is assuming that
79 * tape drives are expensive beasts, and in particular that there will always
80 * be many more runs than tape drives. In our implementation a "tape drive"
81 * doesn't cost much more than a few Kb of memory buffers, so we can afford
82 * to have lots of them. In particular, if we can have as many tape drives
83 * as sorted runs, we can eliminate any repeated I/O at all. In the current
84 * code we determine the number of tapes M on the basis of workMem: we want
85 * workMem/M to be large enough that we read a fair amount of data each time
86 * we preread from a tape, so as to maintain the locality of access described
87 * above. Nonetheless, with large workMem we can have many tapes.
90 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
91 * Portions Copyright (c) 1994, Regents of the University of California
96 *-------------------------------------------------------------------------
103 #include "access/genam.h"
104 #include "access/nbtree.h"
105 #include "catalog/pg_amop.h"
106 #include "catalog/pg_operator.h"
107 #include "commands/tablespace.h"
108 #include "miscadmin.h"
109 #include "pg_trace.h"
110 #include "utils/datum.h"
111 #include "utils/logtape.h"
112 #include "utils/lsyscache.h"
113 #include "utils/memutils.h"
114 #include "utils/pg_rusage.h"
115 #include "utils/rel.h"
116 #include "utils/syscache.h"
117 #include "utils/tuplesort.h"
122 bool trace_sort
= false;
129 #ifdef DEBUG_BOUNDED_SORT
130 bool optimize_bounded_sort
= true;
135 * The objects we actually sort are SortTuple structs. These contain
136 * a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
137 * which is a separate palloc chunk --- we assume it is just one chunk and
138 * can be freed by a simple pfree(). SortTuples also contain the tuple's
139 * first key column in Datum/nullflag format, and an index integer.
141 * Storing the first key column lets us save heap_getattr or index_getattr
142 * calls during tuple comparisons. We could extract and save all the key
143 * columns not just the first, but this would increase code complexity and
144 * overhead, and wouldn't actually save any comparison cycles in the common
145 * case where the first key determines the comparison result. Note that
146 * for a pass-by-reference datatype, datum1 points into the "tuple" storage.
148 * When sorting single Datums, the data value is represented directly by
149 * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
150 * then datum1 points to a separately palloc'd data value that is also pointed
151 * to by the "tuple" pointer; otherwise "tuple" is NULL.
153 * While building initial runs, tupindex holds the tuple's run number. During
154 * merge passes, we re-use it to hold the input tape number that each tuple in
155 * the heap was read from, or to hold the index of the next tuple pre-read
156 * from the same tape in the case of pre-read entries. tupindex goes unused
157 * if the sort occurs entirely in memory.
161 void *tuple
; /* the tuple proper */
162 Datum datum1
; /* value of first key column */
163 bool isnull1
; /* is first key column NULL? */
164 int tupindex
; /* see notes above */
169 * Possible states of a Tuplesort object. These denote the states that
170 * persist between calls of Tuplesort routines.
174 TSS_INITIAL
, /* Loading tuples; still within memory limit */
175 TSS_BOUNDED
, /* Loading tuples into bounded-size heap */
176 TSS_BUILDRUNS
, /* Loading tuples; writing to tape */
177 TSS_SORTEDINMEM
, /* Sort completed entirely in memory */
178 TSS_SORTEDONTAPE
, /* Sort completed, final run is on tape */
179 TSS_FINALMERGE
/* Performing final merge on-the-fly */
183 * Parameters for calculation of number of tapes to use --- see inittapes()
184 * and tuplesort_merge_order().
186 * In this calculation we assume that each tape will cost us about 3 blocks
187 * worth of buffer space (which is an underestimate for very large data
188 * volumes, but it's probably close enough --- see logtape.c).
190 * MERGE_BUFFER_SIZE is how much data we'd like to read from each input
191 * tape during a preread cycle (see discussion at top of file).
193 #define MINORDER 6 /* minimum merge order */
194 #define TAPE_BUFFER_OVERHEAD (BLCKSZ * 3)
195 #define MERGE_BUFFER_SIZE (BLCKSZ * 32)
198 * Private state of a Tuplesort operation.
200 struct Tuplesortstate
202 TupSortStatus status
; /* enumerated value as shown above */
203 int nKeys
; /* number of columns in sort key */
204 bool randomAccess
; /* did caller request random access? */
205 bool bounded
; /* did caller specify a maximum number of
206 * tuples to return? */
207 bool boundUsed
; /* true if we made use of a bounded heap */
208 int bound
; /* if bounded, the maximum number of tuples */
209 long availMem
; /* remaining memory available, in bytes */
210 long allowedMem
; /* total memory allowed, in bytes */
211 int maxTapes
; /* number of tapes (Knuth's T) */
212 int tapeRange
; /* maxTapes-1 (Knuth's P) */
213 MemoryContext sortcontext
; /* memory context holding all sort data */
214 LogicalTapeSet
*tapeset
; /* logtape.c object for tapes in a temp file */
217 * These function pointers decouple the routines that must know what kind
218 * of tuple we are sorting from the routines that don't need to know it.
219 * They are set up by the tuplesort_begin_xxx routines.
221 * Function to compare two tuples; result is per qsort() convention, ie:
222 * <0, 0, >0 according as a<b, a=b, a>b. The API must match
223 * qsort_arg_comparator.
225 int (*comparetup
) (const SortTuple
*a
, const SortTuple
*b
,
226 Tuplesortstate
*state
);
229 * Function to copy a supplied input tuple into palloc'd space and set up
230 * its SortTuple representation (ie, set tuple/datum1/isnull1). Also,
231 * state->availMem must be decreased by the amount of space used for the
232 * tuple copy (note the SortTuple struct itself is not counted).
234 void (*copytup
) (Tuplesortstate
*state
, SortTuple
*stup
, void *tup
);
237 * Function to write a stored tuple onto tape. The representation of the
238 * tuple on tape need not be the same as it is in memory; requirements on
239 * the tape representation are given below. After writing the tuple,
240 * pfree() the out-of-line data (not the SortTuple struct!), and increase
241 * state->availMem by the amount of memory space thereby released.
243 void (*writetup
) (Tuplesortstate
*state
, int tapenum
,
247 * Function to read a stored tuple from tape back into memory. 'len' is
248 * the already-read length of the stored tuple. Create a palloc'd copy,
249 * initialize tuple/datum1/isnull1 in the target SortTuple struct, and
250 * decrease state->availMem by the amount of memory space consumed.
252 void (*readtup
) (Tuplesortstate
*state
, SortTuple
*stup
,
253 int tapenum
, unsigned int len
);
256 * Function to reverse the sort direction from its current state. (We
257 * could dispense with this if we wanted to enforce that all variants
258 * represent the sort key information alike.)
260 void (*reversedirection
) (Tuplesortstate
*state
);
263 * This array holds the tuples now in sort memory. If we are in state
264 * INITIAL, the tuples are in no particular order; if we are in state
265 * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
266 * and FINALMERGE, the tuples are organized in "heap" order per Algorithm
267 * H. (Note that memtupcount only counts the tuples that are part of the
268 * heap --- during merge passes, memtuples[] entries beyond tapeRange are
269 * never in the heap and are used to hold pre-read tuples.) In state
270 * SORTEDONTAPE, the array is not used.
272 SortTuple
*memtuples
; /* array of SortTuple structs */
273 int memtupcount
; /* number of tuples currently present */
274 int memtupsize
; /* allocated length of memtuples array */
277 * While building initial runs, this is the current output run number
278 * (starting at 0). Afterwards, it is the number of initial runs we made.
283 * Unless otherwise noted, all pointer variables below are pointers to
284 * arrays of length maxTapes, holding per-tape data.
288 * These variables are only used during merge passes. mergeactive[i] is
289 * true if we are reading an input run from (actual) tape number i and
290 * have not yet exhausted that run. mergenext[i] is the memtuples index
291 * of the next pre-read tuple (next to be loaded into the heap) for tape
292 * i, or 0 if we are out of pre-read tuples. mergelast[i] similarly
293 * points to the last pre-read tuple from each tape. mergeavailslots[i]
294 * is the number of unused memtuples[] slots reserved for tape i, and
295 * mergeavailmem[i] is the amount of unused space allocated for tape i.
296 * mergefreelist and mergefirstfree keep track of unused locations in the
297 * memtuples[] array. The memtuples[].tupindex fields link together
298 * pre-read tuples for each tape as well as recycled locations in
299 * mergefreelist. It is OK to use 0 as a null link in these lists, because
300 * memtuples[0] is part of the merge heap and is never a pre-read tuple.
302 bool *mergeactive
; /* active input run source? */
303 int *mergenext
; /* first preread tuple for each source */
304 int *mergelast
; /* last preread tuple for each source */
305 int *mergeavailslots
; /* slots left for prereading each tape */
306 long *mergeavailmem
; /* availMem for prereading each tape */
307 int mergefreelist
; /* head of freelist of recycled slots */
308 int mergefirstfree
; /* first slot never used in this merge */
311 * Variables for Algorithm D. Note that destTape is a "logical" tape
312 * number, ie, an index into the tp_xxx[] arrays. Be careful to keep
313 * "logical" and "actual" tape numbers straight!
315 int Level
; /* Knuth's l */
316 int destTape
; /* current output tape (Knuth's j, less 1) */
317 int *tp_fib
; /* Target Fibonacci run counts (A[]) */
318 int *tp_runs
; /* # of real runs on each tape */
319 int *tp_dummy
; /* # of dummy runs for each tape (D[]) */
320 int *tp_tapenum
; /* Actual tape numbers (TAPE[]) */
321 int activeTapes
; /* # of active input tapes in merge pass */
324 * These variables are used after completion of sorting to keep track of
325 * the next tuple to return. (In the tape case, the tape's current read
326 * position is also critical state.)
328 int result_tape
; /* actual tape number of finished output */
329 int current
; /* array index (only used if SORTEDINMEM) */
330 bool eof_reached
; /* reached EOF (needed for cursors) */
332 /* markpos_xxx holds marked position for mark and restore */
333 long markpos_block
; /* tape block# (only used if SORTEDONTAPE) */
334 int markpos_offset
; /* saved "current", or offset in tape block */
335 bool markpos_eof
; /* saved "eof_reached" */
338 * These variables are specific to the MinimalTuple case; they are set by
339 * tuplesort_begin_heap and used only by the MinimalTuple routines.
342 ScanKey scanKeys
; /* array of length nKeys */
345 * These variables are specific to the IndexTuple case; they are set by
346 * tuplesort_begin_index_xxx and used only by the IndexTuple routines.
348 Relation indexRel
; /* index being built */
350 /* These are specific to the index_btree subcase: */
351 ScanKey indexScanKey
;
352 bool enforceUnique
; /* complain if we find duplicate tuples */
354 /* These are specific to the index_hash subcase: */
355 uint32 hash_mask
; /* mask for sortable part of hash code */
358 * These variables are specific to the Datum case; they are set by
359 * tuplesort_begin_datum and used only by the DatumTuple routines.
362 FmgrInfo sortOpFn
; /* cached lookup data for sortOperator */
363 int sortFnFlags
; /* equivalent to sk_flags */
364 /* we need typelen and byval in order to know how to copy the Datums. */
369 * Resource snapshot for time of sort start.
376 #define COMPARETUP(state,a,b) ((*(state)->comparetup) (a, b, state))
377 #define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
378 #define WRITETUP(state,tape,stup) ((*(state)->writetup) (state, tape, stup))
379 #define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len))
380 #define REVERSEDIRECTION(state) ((*(state)->reversedirection) (state))
381 #define LACKMEM(state) ((state)->availMem < 0)
382 #define USEMEM(state,amt) ((state)->availMem -= (amt))
383 #define FREEMEM(state,amt) ((state)->availMem += (amt))
386 * NOTES about on-tape representation of tuples:
388 * We require the first "unsigned int" of a stored tuple to be the total size
389 * on-tape of the tuple, including itself (so it is never zero; an all-zero
390 * unsigned int is used to delimit runs). The remainder of the stored tuple
391 * may or may not match the in-memory representation of the tuple ---
392 * any conversion needed is the job of the writetup and readtup routines.
394 * If state->randomAccess is true, then the stored representation of the
395 * tuple must be followed by another "unsigned int" that is a copy of the
396 * length --- so the total tape space used is actually sizeof(unsigned int)
397 * more than the stored length value. This allows read-backwards. When
398 * randomAccess is not true, the write/read routines may omit the extra
401 * writetup is expected to write both length words as well as the tuple
402 * data. When readtup is called, the tape is positioned just after the
403 * front length word; readtup must read the tuple data and advance past
404 * the back length word (if present).
406 * The write/read routines can make use of the tuple description data
407 * stored in the Tuplesortstate record, if needed. They are also expected
408 * to adjust state->availMem by the amount of memory space (not tape space!)
409 * released or consumed. There is no error return from either writetup
410 * or readtup; they should ereport() on failure.
413 * NOTES about memory consumption calculations:
415 * We count space allocated for tuples against the workMem limit, plus
416 * the space used by the variable-size memtuples array. Fixed-size space
417 * is not counted; it's small enough to not be interesting.
419 * Note that we count actual space used (as shown by GetMemoryChunkSpace)
420 * rather than the originally-requested size. This is important since
421 * palloc can add substantial overhead. It's not a complete answer since
422 * we won't count any wasted space in palloc allocation blocks, but it's
423 * a lot better than what we were doing before 7.3.
427 static Tuplesortstate
*tuplesort_begin_common(int workMem
, bool randomAccess
);
428 static void puttuple_common(Tuplesortstate
*state
, SortTuple
*tuple
);
429 static void inittapes(Tuplesortstate
*state
);
430 static void selectnewtape(Tuplesortstate
*state
);
431 static void mergeruns(Tuplesortstate
*state
);
432 static void mergeonerun(Tuplesortstate
*state
);
433 static void beginmerge(Tuplesortstate
*state
);
434 static void mergepreread(Tuplesortstate
*state
);
435 static void mergeprereadone(Tuplesortstate
*state
, int srcTape
);
436 static void dumptuples(Tuplesortstate
*state
, bool alltuples
);
437 static void make_bounded_heap(Tuplesortstate
*state
);
438 static void sort_bounded_heap(Tuplesortstate
*state
);
439 static void tuplesort_heap_insert(Tuplesortstate
*state
, SortTuple
*tuple
,
440 int tupleindex
, bool checkIndex
);
441 static void tuplesort_heap_siftup(Tuplesortstate
*state
, bool checkIndex
);
442 static unsigned int getlen(Tuplesortstate
*state
, int tapenum
, bool eofOK
);
443 static void markrunend(Tuplesortstate
*state
, int tapenum
);
444 static int comparetup_heap(const SortTuple
*a
, const SortTuple
*b
,
445 Tuplesortstate
*state
);
446 static void copytup_heap(Tuplesortstate
*state
, SortTuple
*stup
, void *tup
);
447 static void writetup_heap(Tuplesortstate
*state
, int tapenum
,
449 static void readtup_heap(Tuplesortstate
*state
, SortTuple
*stup
,
450 int tapenum
, unsigned int len
);
451 static void reversedirection_heap(Tuplesortstate
*state
);
452 static int comparetup_index_btree(const SortTuple
*a
, const SortTuple
*b
,
453 Tuplesortstate
*state
);
454 static int comparetup_index_hash(const SortTuple
*a
, const SortTuple
*b
,
455 Tuplesortstate
*state
);
456 static void copytup_index(Tuplesortstate
*state
, SortTuple
*stup
, void *tup
);
457 static void writetup_index(Tuplesortstate
*state
, int tapenum
,
459 static void readtup_index(Tuplesortstate
*state
, SortTuple
*stup
,
460 int tapenum
, unsigned int len
);
461 static void reversedirection_index_btree(Tuplesortstate
*state
);
462 static void reversedirection_index_hash(Tuplesortstate
*state
);
463 static int comparetup_datum(const SortTuple
*a
, const SortTuple
*b
,
464 Tuplesortstate
*state
);
465 static void copytup_datum(Tuplesortstate
*state
, SortTuple
*stup
, void *tup
);
466 static void writetup_datum(Tuplesortstate
*state
, int tapenum
,
468 static void readtup_datum(Tuplesortstate
*state
, SortTuple
*stup
,
469 int tapenum
, unsigned int len
);
470 static void reversedirection_datum(Tuplesortstate
*state
);
471 static void free_sort_tuple(Tuplesortstate
*state
, SortTuple
*stup
);
475 * tuplesort_begin_xxx
477 * Initialize for a tuple sort operation.
479 * After calling tuplesort_begin, the caller should call tuplesort_putXXX
480 * zero or more times, then call tuplesort_performsort when all the tuples
481 * have been supplied. After performsort, retrieve the tuples in sorted
482 * order by calling tuplesort_getXXX until it returns false/NULL. (If random
483 * access was requested, rescan, markpos, and restorepos can also be called.)
484 * Call tuplesort_end to terminate the operation and release memory/disk space.
486 * Each variant of tuplesort_begin has a workMem parameter specifying the
487 * maximum number of kilobytes of RAM to use before spilling data to disk.
488 * (The normal value of this parameter is work_mem, but some callers use
489 * other values.) Each variant also has a randomAccess parameter specifying
490 * whether the caller needs non-sequential access to the sort result.
493 static Tuplesortstate
*
494 tuplesort_begin_common(int workMem
, bool randomAccess
)
496 Tuplesortstate
*state
;
497 MemoryContext sortcontext
;
498 MemoryContext oldcontext
;
501 * Create a working memory context for this sort operation. All data
502 * needed by the sort will live inside this context.
504 sortcontext
= AllocSetContextCreate(CurrentMemoryContext
,
506 ALLOCSET_DEFAULT_MINSIZE
,
507 ALLOCSET_DEFAULT_INITSIZE
,
508 ALLOCSET_DEFAULT_MAXSIZE
);
511 * Make the Tuplesortstate within the per-sort context. This way, we
512 * don't need a separate pfree() operation for it at shutdown.
514 oldcontext
= MemoryContextSwitchTo(sortcontext
);
516 state
= (Tuplesortstate
*) palloc0(sizeof(Tuplesortstate
));
520 pg_rusage_init(&state
->ru_start
);
523 state
->status
= TSS_INITIAL
;
524 state
->randomAccess
= randomAccess
;
525 state
->bounded
= false;
526 state
->boundUsed
= false;
527 state
->allowedMem
= workMem
* 1024L;
528 state
->availMem
= state
->allowedMem
;
529 state
->sortcontext
= sortcontext
;
530 state
->tapeset
= NULL
;
532 state
->memtupcount
= 0;
533 state
->memtupsize
= 1024; /* initial guess */
534 state
->memtuples
= (SortTuple
*) palloc(state
->memtupsize
* sizeof(SortTuple
));
536 USEMEM(state
, GetMemoryChunkSpace(state
->memtuples
));
538 /* workMem must be large enough for the minimal memtuples array */
540 elog(ERROR
, "insufficient memory allowed for sort");
542 state
->currentRun
= 0;
545 * maxTapes, tapeRange, and Algorithm D variables will be initialized by
546 * inittapes(), if needed
549 state
->result_tape
= -1; /* flag that result tape has not been formed */
551 MemoryContextSwitchTo(oldcontext
);
557 tuplesort_begin_heap(TupleDesc tupDesc
,
558 int nkeys
, AttrNumber
*attNums
,
559 Oid
*sortOperators
, bool *nullsFirstFlags
,
560 int workMem
, bool randomAccess
)
562 Tuplesortstate
*state
= tuplesort_begin_common(workMem
, randomAccess
);
563 MemoryContext oldcontext
;
566 oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
568 AssertArg(nkeys
> 0);
573 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
574 nkeys
, workMem
, randomAccess
? 't' : 'f');
577 TRACE_POSTGRESQL_SORT_START(HEAP_SORT
, false, nkeys
, workMem
, randomAccess
);
579 state
->nKeys
= nkeys
;
581 state
->comparetup
= comparetup_heap
;
582 state
->copytup
= copytup_heap
;
583 state
->writetup
= writetup_heap
;
584 state
->readtup
= readtup_heap
;
585 state
->reversedirection
= reversedirection_heap
;
587 state
->tupDesc
= tupDesc
; /* assume we need not copy tupDesc */
588 state
->scanKeys
= (ScanKey
) palloc0(nkeys
* sizeof(ScanKeyData
));
590 for (i
= 0; i
< nkeys
; i
++)
595 AssertArg(attNums
[i
] != 0);
596 AssertArg(sortOperators
[i
] != 0);
598 if (!get_compare_function_for_ordering_op(sortOperators
[i
],
599 &sortFunction
, &reverse
))
600 elog(ERROR
, "operator %u is not a valid ordering operator",
604 * We needn't fill in sk_strategy or sk_subtype since these scankeys
605 * will never be passed to an index.
607 ScanKeyInit(&state
->scanKeys
[i
],
613 /* However, we use btree's conventions for encoding directionality */
615 state
->scanKeys
[i
].sk_flags
|= SK_BT_DESC
;
616 if (nullsFirstFlags
[i
])
617 state
->scanKeys
[i
].sk_flags
|= SK_BT_NULLS_FIRST
;
620 MemoryContextSwitchTo(oldcontext
);
626 tuplesort_begin_index_btree(Relation indexRel
,
628 int workMem
, bool randomAccess
)
630 Tuplesortstate
*state
= tuplesort_begin_common(workMem
, randomAccess
);
631 MemoryContext oldcontext
;
633 oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
638 "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
639 enforceUnique
? 't' : 'f',
640 workMem
, randomAccess
? 't' : 'f');
643 state
->nKeys
= RelationGetNumberOfAttributes(indexRel
);
645 TRACE_POSTGRESQL_SORT_START(INDEX_SORT
, enforceUnique
, state
->nKeys
, workMem
, randomAccess
);
647 state
->comparetup
= comparetup_index_btree
;
648 state
->copytup
= copytup_index
;
649 state
->writetup
= writetup_index
;
650 state
->readtup
= readtup_index
;
651 state
->reversedirection
= reversedirection_index_btree
;
653 state
->indexRel
= indexRel
;
654 state
->indexScanKey
= _bt_mkscankey_nodata(indexRel
);
655 state
->enforceUnique
= enforceUnique
;
657 MemoryContextSwitchTo(oldcontext
);
663 tuplesort_begin_index_hash(Relation indexRel
,
665 int workMem
, bool randomAccess
)
667 Tuplesortstate
*state
= tuplesort_begin_common(workMem
, randomAccess
);
668 MemoryContext oldcontext
;
670 oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
675 "begin index sort: hash_mask = 0x%x, workMem = %d, randomAccess = %c",
677 workMem
, randomAccess
? 't' : 'f');
680 state
->nKeys
= 1; /* Only one sort column, the hash code */
682 state
->comparetup
= comparetup_index_hash
;
683 state
->copytup
= copytup_index
;
684 state
->writetup
= writetup_index
;
685 state
->readtup
= readtup_index
;
686 state
->reversedirection
= reversedirection_index_hash
;
688 state
->indexRel
= indexRel
;
690 state
->hash_mask
= hash_mask
;
692 MemoryContextSwitchTo(oldcontext
);
698 tuplesort_begin_datum(Oid datumType
,
699 Oid sortOperator
, bool nullsFirstFlag
,
700 int workMem
, bool randomAccess
)
702 Tuplesortstate
*state
= tuplesort_begin_common(workMem
, randomAccess
);
703 MemoryContext oldcontext
;
709 oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
714 "begin datum sort: workMem = %d, randomAccess = %c",
715 workMem
, randomAccess
? 't' : 'f');
718 TRACE_POSTGRESQL_SORT_START(DATUM_SORT
, false, 1, workMem
, randomAccess
);
720 state
->nKeys
= 1; /* always a one-column sort */
722 state
->comparetup
= comparetup_datum
;
723 state
->copytup
= copytup_datum
;
724 state
->writetup
= writetup_datum
;
725 state
->readtup
= readtup_datum
;
726 state
->reversedirection
= reversedirection_datum
;
728 state
->datumType
= datumType
;
730 /* lookup the ordering function */
731 if (!get_compare_function_for_ordering_op(sortOperator
,
732 &sortFunction
, &reverse
))
733 elog(ERROR
, "operator %u is not a valid ordering operator",
735 fmgr_info(sortFunction
, &state
->sortOpFn
);
737 /* set ordering flags */
738 state
->sortFnFlags
= reverse
? SK_BT_DESC
: 0;
740 state
->sortFnFlags
|= SK_BT_NULLS_FIRST
;
742 /* lookup necessary attributes of the datum type */
743 get_typlenbyval(datumType
, &typlen
, &typbyval
);
744 state
->datumTypeLen
= typlen
;
745 state
->datumTypeByVal
= typbyval
;
747 MemoryContextSwitchTo(oldcontext
);
753 * tuplesort_set_bound
755 * Advise tuplesort that at most the first N result tuples are required.
757 * Must be called before inserting any tuples. (Actually, we could allow it
758 * as long as the sort hasn't spilled to disk, but there seems no need for
759 * delayed calls at the moment.)
761 * This is a hint only. The tuplesort may still return more tuples than
765 tuplesort_set_bound(Tuplesortstate
*state
, int64 bound
)
767 /* Assert we're called before loading any tuples */
768 Assert(state
->status
== TSS_INITIAL
);
769 Assert(state
->memtupcount
== 0);
770 Assert(!state
->bounded
);
772 #ifdef DEBUG_BOUNDED_SORT
773 /* Honor GUC setting that disables the feature (for easy testing) */
774 if (!optimize_bounded_sort
)
778 /* We want to be able to compute bound * 2, so limit the setting */
779 if (bound
> (int64
) (INT_MAX
/ 2))
782 state
->bounded
= true;
783 state
->bound
= (int) bound
;
789 * Release resources and clean up.
791 * NOTE: after calling this, any pointers returned by tuplesort_getXXX are
792 * pointing to garbage. Be careful not to attempt to use or free such
793 * pointers afterwards!
796 tuplesort_end(Tuplesortstate
*state
)
798 /* context swap probably not needed, but let's be safe */
799 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
805 spaceUsed
= LogicalTapeSetBlocks(state
->tapeset
);
807 spaceUsed
= (state
->allowedMem
- state
->availMem
+ 1023) / 1024;
811 * Delete temporary "tape" files, if any.
813 * Note: want to include this in reported total cost of sort, hence need
814 * for two #ifdef TRACE_SORT sections.
817 LogicalTapeSetClose(state
->tapeset
);
823 elog(LOG
, "external sort ended, %ld disk blocks used: %s",
824 spaceUsed
, pg_rusage_show(&state
->ru_start
));
826 elog(LOG
, "internal sort ended, %ld KB used: %s",
827 spaceUsed
, pg_rusage_show(&state
->ru_start
));
831 TRACE_POSTGRESQL_SORT_DONE(state
->tapeset
,
832 (state
->tapeset
? LogicalTapeSetBlocks(state
->tapeset
) :
833 (state
->allowedMem
- state
->availMem
+ 1023) / 1024));
836 MemoryContextSwitchTo(oldcontext
);
839 * Free the per-sort memory context, thereby releasing all working memory,
840 * including the Tuplesortstate struct itself.
842 MemoryContextDelete(state
->sortcontext
);
846 * Grow the memtuples[] array, if possible within our memory constraint.
847 * Return TRUE if able to enlarge the array, FALSE if not.
849 * At each increment we double the size of the array. When we are short
850 * on memory we could consider smaller increases, but because availMem
851 * moves around with tuple addition/removal, this might result in thrashing.
852 * Small increases in the array size are likely to be pretty inefficient.
855 grow_memtuples(Tuplesortstate
*state
)
858 * We need to be sure that we do not cause LACKMEM to become true, else
859 * the space management algorithm will go nuts. We assume here that the
860 * memory chunk overhead associated with the memtuples array is constant
861 * and so there will be no unexpected addition to what we ask for. (The
862 * minimum array size established in tuplesort_begin_common is large
863 * enough to force palloc to treat it as a separate chunk, so this
864 * assumption should be good. But let's check it.)
866 if (state
->availMem
<= (long) (state
->memtupsize
* sizeof(SortTuple
)))
870 * On a 64-bit machine, allowedMem could be high enough to get us into
871 * trouble with MaxAllocSize, too.
873 if ((Size
) (state
->memtupsize
* 2) >= MaxAllocSize
/ sizeof(SortTuple
))
876 FREEMEM(state
, GetMemoryChunkSpace(state
->memtuples
));
877 state
->memtupsize
*= 2;
878 state
->memtuples
= (SortTuple
*)
879 repalloc(state
->memtuples
,
880 state
->memtupsize
* sizeof(SortTuple
));
881 USEMEM(state
, GetMemoryChunkSpace(state
->memtuples
));
883 elog(ERROR
, "unexpected out-of-memory situation during sort");
888 * Accept one tuple while collecting input data for sort.
890 * Note that the input data is always copied; the caller need not save it.
893 tuplesort_puttupleslot(Tuplesortstate
*state
, TupleTableSlot
*slot
)
895 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
899 * Copy the given tuple into memory we control, and decrease availMem.
900 * Then call the common code.
902 COPYTUP(state
, &stup
, (void *) slot
);
904 puttuple_common(state
, &stup
);
906 MemoryContextSwitchTo(oldcontext
);
910 * Accept one index tuple while collecting input data for sort.
912 * Note that the input tuple is always copied; the caller need not save it.
915 tuplesort_putindextuple(Tuplesortstate
*state
, IndexTuple tuple
)
917 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
921 * Copy the given tuple into memory we control, and decrease availMem.
922 * Then call the common code.
924 COPYTUP(state
, &stup
, (void *) tuple
);
926 puttuple_common(state
, &stup
);
928 MemoryContextSwitchTo(oldcontext
);
932 * Accept one Datum while collecting input data for sort.
934 * If the Datum is pass-by-ref type, the value will be copied.
937 tuplesort_putdatum(Tuplesortstate
*state
, Datum val
, bool isNull
)
939 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
943 * If it's a pass-by-reference value, copy it into memory we control, and
944 * decrease availMem. Then call the common code.
946 if (isNull
|| state
->datumTypeByVal
)
949 stup
.isnull1
= isNull
;
950 stup
.tuple
= NULL
; /* no separate storage */
954 stup
.datum1
= datumCopy(val
, false, state
->datumTypeLen
);
955 stup
.isnull1
= false;
956 stup
.tuple
= DatumGetPointer(stup
.datum1
);
957 USEMEM(state
, GetMemoryChunkSpace(stup
.tuple
));
960 puttuple_common(state
, &stup
);
962 MemoryContextSwitchTo(oldcontext
);
966 * Shared code for tuple and datum cases.
969 puttuple_common(Tuplesortstate
*state
, SortTuple
*tuple
)
971 switch (state
->status
)
976 * Save the tuple into the unsorted array. First, grow the array
977 * as needed. Note that we try to grow the array when there is
978 * still one free slot remaining --- if we fail, there'll still be
979 * room to store the incoming tuple, and then we'll switch to
980 * tape-based operation.
982 if (state
->memtupcount
>= state
->memtupsize
- 1)
984 (void) grow_memtuples(state
);
985 Assert(state
->memtupcount
< state
->memtupsize
);
987 state
->memtuples
[state
->memtupcount
++] = *tuple
;
990 * Check if it's time to switch over to a bounded heapsort. We do
991 * so if the input tuple count exceeds twice the desired tuple
992 * count (this is a heuristic for where heapsort becomes cheaper
993 * than a quicksort), or if we've just filled workMem and have
994 * enough tuples to meet the bound.
996 * Note that once we enter TSS_BOUNDED state we will always try to
997 * complete the sort that way. In the worst case, if later input
998 * tuples are larger than earlier ones, this might cause us to
999 * exceed workMem significantly.
1001 if (state
->bounded
&&
1002 (state
->memtupcount
> state
->bound
* 2 ||
1003 (state
->memtupcount
> state
->bound
&& LACKMEM(state
))))
1007 elog(LOG
, "switching to bounded heapsort at %d tuples: %s",
1009 pg_rusage_show(&state
->ru_start
));
1011 make_bounded_heap(state
);
1016 * Done if we still fit in available memory and have array slots.
1018 if (state
->memtupcount
< state
->memtupsize
&& !LACKMEM(state
))
1022 * Nope; time to switch to tape-based operation.
1027 * Dump tuples until we are back under the limit.
1029 dumptuples(state
, false);
1035 * We don't want to grow the array here, so check whether the new
1036 * tuple can be discarded before putting it in. This should be a
1037 * good speed optimization, too, since when there are many more
1038 * input tuples than the bound, most input tuples can be discarded
1039 * with just this one comparison. Note that because we currently
1040 * have the sort direction reversed, we must check for <= not >=.
1042 if (COMPARETUP(state
, tuple
, &state
->memtuples
[0]) <= 0)
1044 /* new tuple <= top of the heap, so we can discard it */
1045 free_sort_tuple(state
, tuple
);
1049 /* discard top of heap, sift up, insert new tuple */
1050 free_sort_tuple(state
, &state
->memtuples
[0]);
1051 tuplesort_heap_siftup(state
, false);
1052 tuplesort_heap_insert(state
, tuple
, 0, false);
1059 * Insert the tuple into the heap, with run number currentRun if
1060 * it can go into the current run, else run number currentRun+1.
1061 * The tuple can go into the current run if it is >= the first
1062 * not-yet-output tuple. (Actually, it could go into the current
1063 * run if it is >= the most recently output tuple ... but that
1064 * would require keeping around the tuple we last output, and it's
1065 * simplest to let writetup free each tuple as soon as it's
1068 * Note there will always be at least one tuple in the heap at
1069 * this point; see dumptuples.
1071 Assert(state
->memtupcount
> 0);
1072 if (COMPARETUP(state
, tuple
, &state
->memtuples
[0]) >= 0)
1073 tuplesort_heap_insert(state
, tuple
, state
->currentRun
, true);
1075 tuplesort_heap_insert(state
, tuple
, state
->currentRun
+ 1, true);
1078 * If we are over the memory limit, dump tuples till we're under.
1080 dumptuples(state
, false);
1084 elog(ERROR
, "invalid tuplesort state");
1090 * All tuples have been provided; finish the sort.
1093 tuplesort_performsort(Tuplesortstate
*state
)
1095 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
1099 elog(LOG
, "performsort starting: %s",
1100 pg_rusage_show(&state
->ru_start
));
1103 switch (state
->status
)
1108 * We were able to accumulate all the tuples within the allowed
1109 * amount of memory. Just qsort 'em and we're done.
1111 if (state
->memtupcount
> 1)
1112 qsort_arg((void *) state
->memtuples
,
1115 (qsort_arg_comparator
) state
->comparetup
,
1118 state
->eof_reached
= false;
1119 state
->markpos_offset
= 0;
1120 state
->markpos_eof
= false;
1121 state
->status
= TSS_SORTEDINMEM
;
1127 * We were able to accumulate all the tuples required for output
1128 * in memory, using a heap to eliminate excess tuples. Now we
1129 * have to transform the heap to a properly-sorted array.
1131 sort_bounded_heap(state
);
1133 state
->eof_reached
= false;
1134 state
->markpos_offset
= 0;
1135 state
->markpos_eof
= false;
1136 state
->status
= TSS_SORTEDINMEM
;
1142 * Finish tape-based sort. First, flush all tuples remaining in
1143 * memory out to tape; then merge until we have a single remaining
1144 * run (or, if !randomAccess, one run per tape). Note that
1145 * mergeruns sets the correct state->status.
1147 dumptuples(state
, true);
1149 state
->eof_reached
= false;
1150 state
->markpos_block
= 0L;
1151 state
->markpos_offset
= 0;
1152 state
->markpos_eof
= false;
1156 elog(ERROR
, "invalid tuplesort state");
1163 if (state
->status
== TSS_FINALMERGE
)
1164 elog(LOG
, "performsort done (except %d-way final merge): %s",
1166 pg_rusage_show(&state
->ru_start
));
1168 elog(LOG
, "performsort done: %s",
1169 pg_rusage_show(&state
->ru_start
));
1173 MemoryContextSwitchTo(oldcontext
);
1177 * Internal routine to fetch the next tuple in either forward or back
1178 * direction into *stup. Returns FALSE if no more tuples.
1179 * If *should_free is set, the caller must pfree stup.tuple when done with it.
1182 tuplesort_gettuple_common(Tuplesortstate
*state
, bool forward
,
1183 SortTuple
*stup
, bool *should_free
)
1185 unsigned int tuplen
;
1187 switch (state
->status
)
1189 case TSS_SORTEDINMEM
:
1190 Assert(forward
|| state
->randomAccess
);
1191 *should_free
= false;
1194 if (state
->current
< state
->memtupcount
)
1196 *stup
= state
->memtuples
[state
->current
++];
1199 state
->eof_reached
= true;
1202 * Complain if caller tries to retrieve more tuples than
1203 * originally asked for in a bounded sort. This is because
1204 * returning EOF here might be the wrong thing.
1206 if (state
->bounded
&& state
->current
>= state
->bound
)
1207 elog(ERROR
, "retrieved too many tuples in a bounded sort");
1213 if (state
->current
<= 0)
1217 * if all tuples are fetched already then we return last
1218 * tuple, else - tuple before last returned.
1220 if (state
->eof_reached
)
1221 state
->eof_reached
= false;
1224 state
->current
--; /* last returned tuple */
1225 if (state
->current
<= 0)
1228 *stup
= state
->memtuples
[state
->current
- 1];
1233 case TSS_SORTEDONTAPE
:
1234 Assert(forward
|| state
->randomAccess
);
1235 *should_free
= true;
1238 if (state
->eof_reached
)
1240 if ((tuplen
= getlen(state
, state
->result_tape
, true)) != 0)
1242 READTUP(state
, stup
, state
->result_tape
, tuplen
);
1247 state
->eof_reached
= true;
1255 * if all tuples are fetched already then we return last tuple,
1256 * else - tuple before last returned.
1258 if (state
->eof_reached
)
1261 * Seek position is pointing just past the zero tuplen at the
1262 * end of file; back up to fetch last tuple's ending length
1263 * word. If seek fails we must have a completely empty file.
1265 if (!LogicalTapeBackspace(state
->tapeset
,
1267 2 * sizeof(unsigned int)))
1269 state
->eof_reached
= false;
1274 * Back up and fetch previously-returned tuple's ending length
1275 * word. If seek fails, assume we are at start of file.
1277 if (!LogicalTapeBackspace(state
->tapeset
,
1279 sizeof(unsigned int)))
1281 tuplen
= getlen(state
, state
->result_tape
, false);
1284 * Back up to get ending length word of tuple before it.
1286 if (!LogicalTapeBackspace(state
->tapeset
,
1288 tuplen
+ 2 * sizeof(unsigned int)))
1291 * If that fails, presumably the prev tuple is the first
1292 * in the file. Back up so that it becomes next to read
1293 * in forward direction (not obviously right, but that is
1294 * what in-memory case does).
1296 if (!LogicalTapeBackspace(state
->tapeset
,
1298 tuplen
+ sizeof(unsigned int)))
1299 elog(ERROR
, "bogus tuple length in backward scan");
1304 tuplen
= getlen(state
, state
->result_tape
, false);
1307 * Now we have the length of the prior tuple, back up and read it.
1308 * Note: READTUP expects we are positioned after the initial
1309 * length word of the tuple, so back up to that point.
1311 if (!LogicalTapeBackspace(state
->tapeset
,
1314 elog(ERROR
, "bogus tuple length in backward scan");
1315 READTUP(state
, stup
, state
->result_tape
, tuplen
);
1318 case TSS_FINALMERGE
:
1320 *should_free
= true;
1323 * This code should match the inner loop of mergeonerun().
1325 if (state
->memtupcount
> 0)
1327 int srcTape
= state
->memtuples
[0].tupindex
;
1332 *stup
= state
->memtuples
[0];
1333 /* returned tuple is no longer counted in our memory space */
1336 tuplen
= GetMemoryChunkSpace(stup
->tuple
);
1337 state
->availMem
+= tuplen
;
1338 state
->mergeavailmem
[srcTape
] += tuplen
;
1340 tuplesort_heap_siftup(state
, false);
1341 if ((tupIndex
= state
->mergenext
[srcTape
]) == 0)
1344 * out of preloaded data on this tape, try to read more
1346 * Unlike mergeonerun(), we only preload from the single
1347 * tape that's run dry. See mergepreread() comments.
1349 mergeprereadone(state
, srcTape
);
1352 * if still no data, we've reached end of run on this tape
1354 if ((tupIndex
= state
->mergenext
[srcTape
]) == 0)
1357 /* pull next preread tuple from list, insert in heap */
1358 newtup
= &state
->memtuples
[tupIndex
];
1359 state
->mergenext
[srcTape
] = newtup
->tupindex
;
1360 if (state
->mergenext
[srcTape
] == 0)
1361 state
->mergelast
[srcTape
] = 0;
1362 tuplesort_heap_insert(state
, newtup
, srcTape
, false);
1363 /* put the now-unused memtuples entry on the freelist */
1364 newtup
->tupindex
= state
->mergefreelist
;
1365 state
->mergefreelist
= tupIndex
;
1366 state
->mergeavailslots
[srcTape
]++;
1372 elog(ERROR
, "invalid tuplesort state");
1373 return false; /* keep compiler quiet */
1378 * Fetch the next tuple in either forward or back direction.
1379 * If successful, put tuple in slot and return TRUE; else, clear the slot
1383 tuplesort_gettupleslot(Tuplesortstate
*state
, bool forward
,
1384 TupleTableSlot
*slot
)
1386 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
1390 if (!tuplesort_gettuple_common(state
, forward
, &stup
, &should_free
))
1393 MemoryContextSwitchTo(oldcontext
);
1397 ExecStoreMinimalTuple((MinimalTuple
) stup
.tuple
, slot
, should_free
);
1402 ExecClearTuple(slot
);
1408 * Fetch the next index tuple in either forward or back direction.
1409 * Returns NULL if no more tuples. If *should_free is set, the
1410 * caller must pfree the returned tuple when done with it.
1413 tuplesort_getindextuple(Tuplesortstate
*state
, bool forward
,
1416 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
1419 if (!tuplesort_gettuple_common(state
, forward
, &stup
, should_free
))
1422 MemoryContextSwitchTo(oldcontext
);
1424 return (IndexTuple
) stup
.tuple
;
1428 * Fetch the next Datum in either forward or back direction.
1429 * Returns FALSE if no more datums.
1431 * If the Datum is pass-by-ref type, the returned value is freshly palloc'd
1432 * and is now owned by the caller.
1435 tuplesort_getdatum(Tuplesortstate
*state
, bool forward
,
1436 Datum
*val
, bool *isNull
)
1438 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
1442 if (!tuplesort_gettuple_common(state
, forward
, &stup
, &should_free
))
1444 MemoryContextSwitchTo(oldcontext
);
1448 if (stup
.isnull1
|| state
->datumTypeByVal
)
1451 *isNull
= stup
.isnull1
;
1458 *val
= datumCopy(stup
.datum1
, false, state
->datumTypeLen
);
1462 MemoryContextSwitchTo(oldcontext
);
1468 * tuplesort_merge_order - report merge order we'll use for given memory
1469 * (note: "merge order" just means the number of input tapes in the merge).
1471 * This is exported for use by the planner. allowedMem is in bytes.
1474 tuplesort_merge_order(long allowedMem
)
1479 * We need one tape for each merge input, plus another one for the output,
1480 * and each of these tapes needs buffer space. In addition we want
1481 * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
1484 * Note: you might be thinking we need to account for the memtuples[]
1485 * array in this calculation, but we effectively treat that as part of the
1486 * MERGE_BUFFER_SIZE workspace.
1488 mOrder
= (allowedMem
- TAPE_BUFFER_OVERHEAD
) /
1489 (MERGE_BUFFER_SIZE
+ TAPE_BUFFER_OVERHEAD
);
1491 /* Even in minimum memory, use at least a MINORDER merge */
1492 mOrder
= Max(mOrder
, MINORDER
);
1498 * inittapes - initialize for tape sorting.
1500 * This is called only if we have found we don't have room to sort in memory.
1503 inittapes(Tuplesortstate
*state
)
1510 /* Compute number of tapes to use: merge order plus 1 */
1511 maxTapes
= tuplesort_merge_order(state
->allowedMem
) + 1;
1514 * We must have at least 2*maxTapes slots in the memtuples[] array, else
1515 * we'd not have room for merge heap plus preread. It seems unlikely that
1516 * this case would ever occur, but be safe.
1518 maxTapes
= Min(maxTapes
, state
->memtupsize
/ 2);
1520 state
->maxTapes
= maxTapes
;
1521 state
->tapeRange
= maxTapes
- 1;
1525 elog(LOG
, "switching to external sort with %d tapes: %s",
1526 maxTapes
, pg_rusage_show(&state
->ru_start
));
1530 * Decrease availMem to reflect the space needed for tape buffers; but
1531 * don't decrease it to the point that we have no room for tuples. (That
1532 * case is only likely to occur if sorting pass-by-value Datums; in all
1533 * other scenarios the memtuples[] array is unlikely to occupy more than
1534 * half of allowedMem. In the pass-by-value case it's not important to
1535 * account for tuple space, so we don't care if LACKMEM becomes
1538 tapeSpace
= maxTapes
* TAPE_BUFFER_OVERHEAD
;
1539 if (tapeSpace
+ GetMemoryChunkSpace(state
->memtuples
) < state
->allowedMem
)
1540 USEMEM(state
, tapeSpace
);
1543 * Make sure that the temp file(s) underlying the tape set are created in
1544 * suitable temp tablespaces.
1546 PrepareTempTablespaces();
1549 * Create the tape set and allocate the per-tape data arrays.
1551 state
->tapeset
= LogicalTapeSetCreate(maxTapes
);
1553 state
->mergeactive
= (bool *) palloc0(maxTapes
* sizeof(bool));
1554 state
->mergenext
= (int *) palloc0(maxTapes
* sizeof(int));
1555 state
->mergelast
= (int *) palloc0(maxTapes
* sizeof(int));
1556 state
->mergeavailslots
= (int *) palloc0(maxTapes
* sizeof(int));
1557 state
->mergeavailmem
= (long *) palloc0(maxTapes
* sizeof(long));
1558 state
->tp_fib
= (int *) palloc0(maxTapes
* sizeof(int));
1559 state
->tp_runs
= (int *) palloc0(maxTapes
* sizeof(int));
1560 state
->tp_dummy
= (int *) palloc0(maxTapes
* sizeof(int));
1561 state
->tp_tapenum
= (int *) palloc0(maxTapes
* sizeof(int));
1564 * Convert the unsorted contents of memtuples[] into a heap. Each tuple is
1565 * marked as belonging to run number zero.
1567 * NOTE: we pass false for checkIndex since there's no point in comparing
1568 * indexes in this step, even though we do intend the indexes to be part
1569 * of the sort key...
1571 ntuples
= state
->memtupcount
;
1572 state
->memtupcount
= 0; /* make the heap empty */
1573 for (j
= 0; j
< ntuples
; j
++)
1575 /* Must copy source tuple to avoid possible overwrite */
1576 SortTuple stup
= state
->memtuples
[j
];
1578 tuplesort_heap_insert(state
, &stup
, 0, false);
1580 Assert(state
->memtupcount
== ntuples
);
1582 state
->currentRun
= 0;
1585 * Initialize variables of Algorithm D (step D1).
1587 for (j
= 0; j
< maxTapes
; j
++)
1589 state
->tp_fib
[j
] = 1;
1590 state
->tp_runs
[j
] = 0;
1591 state
->tp_dummy
[j
] = 1;
1592 state
->tp_tapenum
[j
] = j
;
1594 state
->tp_fib
[state
->tapeRange
] = 0;
1595 state
->tp_dummy
[state
->tapeRange
] = 0;
1598 state
->destTape
= 0;
1600 state
->status
= TSS_BUILDRUNS
;
1604 * selectnewtape -- select new tape for new initial run.
1606 * This is called after finishing a run when we know another run
1607 * must be started. This implements steps D3, D4 of Algorithm D.
1610 selectnewtape(Tuplesortstate
*state
)
1615 /* Step D3: advance j (destTape) */
1616 if (state
->tp_dummy
[state
->destTape
] < state
->tp_dummy
[state
->destTape
+ 1])
1621 if (state
->tp_dummy
[state
->destTape
] != 0)
1623 state
->destTape
= 0;
1627 /* Step D4: increase level */
1629 a
= state
->tp_fib
[0];
1630 for (j
= 0; j
< state
->tapeRange
; j
++)
1632 state
->tp_dummy
[j
] = a
+ state
->tp_fib
[j
+ 1] - state
->tp_fib
[j
];
1633 state
->tp_fib
[j
] = a
+ state
->tp_fib
[j
+ 1];
1635 state
->destTape
= 0;
1639 * mergeruns -- merge all the completed initial runs.
1641 * This implements steps D5, D6 of Algorithm D. All input data has
1642 * already been written to initial runs on tape (see dumptuples).
1645 mergeruns(Tuplesortstate
*state
)
1652 Assert(state
->status
== TSS_BUILDRUNS
);
1653 Assert(state
->memtupcount
== 0);
1656 * If we produced only one initial run (quite likely if the total data
1657 * volume is between 1X and 2X workMem), we can just use that tape as the
1658 * finished output, rather than doing a useless merge. (This obvious
1659 * optimization is not in Knuth's algorithm.)
1661 if (state
->currentRun
== 1)
1663 state
->result_tape
= state
->tp_tapenum
[state
->destTape
];
1664 /* must freeze and rewind the finished output tape */
1665 LogicalTapeFreeze(state
->tapeset
, state
->result_tape
);
1666 state
->status
= TSS_SORTEDONTAPE
;
1670 /* End of step D2: rewind all output tapes to prepare for merging */
1671 for (tapenum
= 0; tapenum
< state
->tapeRange
; tapenum
++)
1672 LogicalTapeRewind(state
->tapeset
, tapenum
, false);
1677 * At this point we know that tape[T] is empty. If there's just one
1678 * (real or dummy) run left on each input tape, then only one merge
1679 * pass remains. If we don't have to produce a materialized sorted
1680 * tape, we can stop at this point and do the final merge on-the-fly.
1682 if (!state
->randomAccess
)
1684 bool allOneRun
= true;
1686 Assert(state
->tp_runs
[state
->tapeRange
] == 0);
1687 for (tapenum
= 0; tapenum
< state
->tapeRange
; tapenum
++)
1689 if (state
->tp_runs
[tapenum
] + state
->tp_dummy
[tapenum
] != 1)
1697 /* Tell logtape.c we won't be writing anymore */
1698 LogicalTapeSetForgetFreeSpace(state
->tapeset
);
1699 /* Initialize for the final merge pass */
1701 state
->status
= TSS_FINALMERGE
;
1706 /* Step D5: merge runs onto tape[T] until tape[P] is empty */
1707 while (state
->tp_runs
[state
->tapeRange
- 1] ||
1708 state
->tp_dummy
[state
->tapeRange
- 1])
1710 bool allDummy
= true;
1712 for (tapenum
= 0; tapenum
< state
->tapeRange
; tapenum
++)
1714 if (state
->tp_dummy
[tapenum
] == 0)
1723 state
->tp_dummy
[state
->tapeRange
]++;
1724 for (tapenum
= 0; tapenum
< state
->tapeRange
; tapenum
++)
1725 state
->tp_dummy
[tapenum
]--;
1731 /* Step D6: decrease level */
1732 if (--state
->Level
== 0)
1734 /* rewind output tape T to use as new input */
1735 LogicalTapeRewind(state
->tapeset
, state
->tp_tapenum
[state
->tapeRange
],
1737 /* rewind used-up input tape P, and prepare it for write pass */
1738 LogicalTapeRewind(state
->tapeset
, state
->tp_tapenum
[state
->tapeRange
- 1],
1740 state
->tp_runs
[state
->tapeRange
- 1] = 0;
1743 * reassign tape units per step D6; note we no longer care about A[]
1745 svTape
= state
->tp_tapenum
[state
->tapeRange
];
1746 svDummy
= state
->tp_dummy
[state
->tapeRange
];
1747 svRuns
= state
->tp_runs
[state
->tapeRange
];
1748 for (tapenum
= state
->tapeRange
; tapenum
> 0; tapenum
--)
1750 state
->tp_tapenum
[tapenum
] = state
->tp_tapenum
[tapenum
- 1];
1751 state
->tp_dummy
[tapenum
] = state
->tp_dummy
[tapenum
- 1];
1752 state
->tp_runs
[tapenum
] = state
->tp_runs
[tapenum
- 1];
1754 state
->tp_tapenum
[0] = svTape
;
1755 state
->tp_dummy
[0] = svDummy
;
1756 state
->tp_runs
[0] = svRuns
;
1760 * Done. Knuth says that the result is on TAPE[1], but since we exited
1761 * the loop without performing the last iteration of step D6, we have not
1762 * rearranged the tape unit assignment, and therefore the result is on
1763 * TAPE[T]. We need to do it this way so that we can freeze the final
1764 * output tape while rewinding it. The last iteration of step D6 would be
1765 * a waste of cycles anyway...
1767 state
->result_tape
= state
->tp_tapenum
[state
->tapeRange
];
1768 LogicalTapeFreeze(state
->tapeset
, state
->result_tape
);
1769 state
->status
= TSS_SORTEDONTAPE
;
1773 * Merge one run from each input tape, except ones with dummy runs.
1775 * This is the inner loop of Algorithm D step D5. We know that the
1776 * output tape is TAPE[T].
1779 mergeonerun(Tuplesortstate
*state
)
1781 int destTape
= state
->tp_tapenum
[state
->tapeRange
];
1789 * Start the merge by loading one tuple from each active source tape into
1790 * the heap. We can also decrease the input run/dummy run counts.
1795 * Execute merge by repeatedly extracting lowest tuple in heap, writing it
1796 * out, and replacing it with next tuple from same tape (if there is
1799 while (state
->memtupcount
> 0)
1801 /* write the tuple to destTape */
1802 priorAvail
= state
->availMem
;
1803 srcTape
= state
->memtuples
[0].tupindex
;
1804 WRITETUP(state
, destTape
, &state
->memtuples
[0]);
1805 /* writetup adjusted total free space, now fix per-tape space */
1806 spaceFreed
= state
->availMem
- priorAvail
;
1807 state
->mergeavailmem
[srcTape
] += spaceFreed
;
1808 /* compact the heap */
1809 tuplesort_heap_siftup(state
, false);
1810 if ((tupIndex
= state
->mergenext
[srcTape
]) == 0)
1812 /* out of preloaded data on this tape, try to read more */
1813 mergepreread(state
);
1814 /* if still no data, we've reached end of run on this tape */
1815 if ((tupIndex
= state
->mergenext
[srcTape
]) == 0)
1818 /* pull next preread tuple from list, insert in heap */
1819 tup
= &state
->memtuples
[tupIndex
];
1820 state
->mergenext
[srcTape
] = tup
->tupindex
;
1821 if (state
->mergenext
[srcTape
] == 0)
1822 state
->mergelast
[srcTape
] = 0;
1823 tuplesort_heap_insert(state
, tup
, srcTape
, false);
1824 /* put the now-unused memtuples entry on the freelist */
1825 tup
->tupindex
= state
->mergefreelist
;
1826 state
->mergefreelist
= tupIndex
;
1827 state
->mergeavailslots
[srcTape
]++;
1831 * When the heap empties, we're done. Write an end-of-run marker on the
1832 * output tape, and increment its count of real runs.
1834 markrunend(state
, destTape
);
1835 state
->tp_runs
[state
->tapeRange
]++;
1839 elog(LOG
, "finished %d-way merge step: %s", state
->activeTapes
,
1840 pg_rusage_show(&state
->ru_start
));
1845 * beginmerge - initialize for a merge pass
1847 * We decrease the counts of real and dummy runs for each tape, and mark
1848 * which tapes contain active input runs in mergeactive[]. Then, load
1849 * as many tuples as we can from each active input tape, and finally
1850 * fill the merge heap with the first tuple from each active tape.
1853 beginmerge(Tuplesortstate
*state
)
1861 /* Heap should be empty here */
1862 Assert(state
->memtupcount
== 0);
1864 /* Adjust run counts and mark the active tapes */
1865 memset(state
->mergeactive
, 0,
1866 state
->maxTapes
* sizeof(*state
->mergeactive
));
1868 for (tapenum
= 0; tapenum
< state
->tapeRange
; tapenum
++)
1870 if (state
->tp_dummy
[tapenum
] > 0)
1871 state
->tp_dummy
[tapenum
]--;
1874 Assert(state
->tp_runs
[tapenum
] > 0);
1875 state
->tp_runs
[tapenum
]--;
1876 srcTape
= state
->tp_tapenum
[tapenum
];
1877 state
->mergeactive
[srcTape
] = true;
1881 state
->activeTapes
= activeTapes
;
1883 /* Clear merge-pass state variables */
1884 memset(state
->mergenext
, 0,
1885 state
->maxTapes
* sizeof(*state
->mergenext
));
1886 memset(state
->mergelast
, 0,
1887 state
->maxTapes
* sizeof(*state
->mergelast
));
1888 state
->mergefreelist
= 0; /* nothing in the freelist */
1889 state
->mergefirstfree
= activeTapes
; /* 1st slot avail for preread */
1892 * Initialize space allocation to let each active input tape have an equal
1893 * share of preread space.
1895 Assert(activeTapes
> 0);
1896 slotsPerTape
= (state
->memtupsize
- state
->mergefirstfree
) / activeTapes
;
1897 Assert(slotsPerTape
> 0);
1898 spacePerTape
= state
->availMem
/ activeTapes
;
1899 for (srcTape
= 0; srcTape
< state
->maxTapes
; srcTape
++)
1901 if (state
->mergeactive
[srcTape
])
1903 state
->mergeavailslots
[srcTape
] = slotsPerTape
;
1904 state
->mergeavailmem
[srcTape
] = spacePerTape
;
1909 * Preread as many tuples as possible (and at least one) from each active
1912 mergepreread(state
);
1914 /* Load the merge heap with the first tuple from each input tape */
1915 for (srcTape
= 0; srcTape
< state
->maxTapes
; srcTape
++)
1917 int tupIndex
= state
->mergenext
[srcTape
];
1922 tup
= &state
->memtuples
[tupIndex
];
1923 state
->mergenext
[srcTape
] = tup
->tupindex
;
1924 if (state
->mergenext
[srcTape
] == 0)
1925 state
->mergelast
[srcTape
] = 0;
1926 tuplesort_heap_insert(state
, tup
, srcTape
, false);
1927 /* put the now-unused memtuples entry on the freelist */
1928 tup
->tupindex
= state
->mergefreelist
;
1929 state
->mergefreelist
= tupIndex
;
1930 state
->mergeavailslots
[srcTape
]++;
1936 * mergepreread - load tuples from merge input tapes
1938 * This routine exists to improve sequentiality of reads during a merge pass,
1939 * as explained in the header comments of this file. Load tuples from each
1940 * active source tape until the tape's run is exhausted or it has used up
1941 * its fair share of available memory. In any case, we guarantee that there
1942 * is at least one preread tuple available from each unexhausted input tape.
1944 * We invoke this routine at the start of a merge pass for initial load,
1945 * and then whenever any tape's preread data runs out. Note that we load
1946 * as much data as possible from all tapes, not just the one that ran out.
1947 * This is because logtape.c works best with a usage pattern that alternates
1948 * between reading a lot of data and writing a lot of data, so whenever we
1949 * are forced to read, we should fill working memory completely.
1951 * In FINALMERGE state, we *don't* use this routine, but instead just preread
1952 * from the single tape that ran dry. There's no read/write alternation in
1953 * that state and so no point in scanning through all the tapes to fix one.
1954 * (Moreover, there may be quite a lot of inactive tapes in that state, since
1955 * we might have had many fewer runs than tapes. In a regular tape-to-tape
1956 * merge we can expect most of the tapes to be active.)
1959 mergepreread(Tuplesortstate
*state
)
1963 for (srcTape
= 0; srcTape
< state
->maxTapes
; srcTape
++)
1964 mergeprereadone(state
, srcTape
);
1968 * mergeprereadone - load tuples from one merge input tape
1970 * Read tuples from the specified tape until it has used up its free memory
1971 * or array slots; but ensure that we have at least one tuple, if any are
1975 mergeprereadone(Tuplesortstate
*state
, int srcTape
)
1977 unsigned int tuplen
;
1983 if (!state
->mergeactive
[srcTape
])
1984 return; /* tape's run is already exhausted */
1985 priorAvail
= state
->availMem
;
1986 state
->availMem
= state
->mergeavailmem
[srcTape
];
1987 while ((state
->mergeavailslots
[srcTape
] > 0 && !LACKMEM(state
)) ||
1988 state
->mergenext
[srcTape
] == 0)
1990 /* read next tuple, if any */
1991 if ((tuplen
= getlen(state
, srcTape
, true)) == 0)
1993 state
->mergeactive
[srcTape
] = false;
1996 READTUP(state
, &stup
, srcTape
, tuplen
);
1997 /* find a free slot in memtuples[] for it */
1998 tupIndex
= state
->mergefreelist
;
2000 state
->mergefreelist
= state
->memtuples
[tupIndex
].tupindex
;
2003 tupIndex
= state
->mergefirstfree
++;
2004 Assert(tupIndex
< state
->memtupsize
);
2006 state
->mergeavailslots
[srcTape
]--;
2007 /* store tuple, append to list for its tape */
2009 state
->memtuples
[tupIndex
] = stup
;
2010 if (state
->mergelast
[srcTape
])
2011 state
->memtuples
[state
->mergelast
[srcTape
]].tupindex
= tupIndex
;
2013 state
->mergenext
[srcTape
] = tupIndex
;
2014 state
->mergelast
[srcTape
] = tupIndex
;
2016 /* update per-tape and global availmem counts */
2017 spaceUsed
= state
->mergeavailmem
[srcTape
] - state
->availMem
;
2018 state
->mergeavailmem
[srcTape
] = state
->availMem
;
2019 state
->availMem
= priorAvail
- spaceUsed
;
2023 * dumptuples - remove tuples from heap and write to tape
2025 * This is used during initial-run building, but not during merging.
2027 * When alltuples = false, dump only enough tuples to get under the
2028 * availMem limit (and leave at least one tuple in the heap in any case,
2029 * since puttuple assumes it always has a tuple to compare to). We also
2030 * insist there be at least one free slot in the memtuples[] array.
2032 * When alltuples = true, dump everything currently in memory.
2033 * (This case is only used at end of input data.)
2035 * If we empty the heap, close out the current run and return (this should
2036 * only happen at end of input data). If we see that the tuple run number
2037 * at the top of the heap has changed, start a new run.
2040 dumptuples(Tuplesortstate
*state
, bool alltuples
)
2043 (LACKMEM(state
) && state
->memtupcount
> 1) ||
2044 state
->memtupcount
>= state
->memtupsize
)
2047 * Dump the heap's frontmost entry, and sift up to remove it from the
2050 Assert(state
->memtupcount
> 0);
2051 WRITETUP(state
, state
->tp_tapenum
[state
->destTape
],
2052 &state
->memtuples
[0]);
2053 tuplesort_heap_siftup(state
, true);
2056 * If the heap is empty *or* top run number has changed, we've
2057 * finished the current run.
2059 if (state
->memtupcount
== 0 ||
2060 state
->currentRun
!= state
->memtuples
[0].tupindex
)
2062 markrunend(state
, state
->tp_tapenum
[state
->destTape
]);
2063 state
->currentRun
++;
2064 state
->tp_runs
[state
->destTape
]++;
2065 state
->tp_dummy
[state
->destTape
]--; /* per Alg D step D2 */
2069 elog(LOG
, "finished writing%s run %d to tape %d: %s",
2070 (state
->memtupcount
== 0) ? " final" : "",
2071 state
->currentRun
, state
->destTape
,
2072 pg_rusage_show(&state
->ru_start
));
2076 * Done if heap is empty, else prepare for new run.
2078 if (state
->memtupcount
== 0)
2080 Assert(state
->currentRun
== state
->memtuples
[0].tupindex
);
2081 selectnewtape(state
);
2087 * tuplesort_rescan - rewind and replay the scan
2090 tuplesort_rescan(Tuplesortstate
*state
)
2092 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
2094 Assert(state
->randomAccess
);
2096 switch (state
->status
)
2098 case TSS_SORTEDINMEM
:
2100 state
->eof_reached
= false;
2101 state
->markpos_offset
= 0;
2102 state
->markpos_eof
= false;
2104 case TSS_SORTEDONTAPE
:
2105 LogicalTapeRewind(state
->tapeset
,
2108 state
->eof_reached
= false;
2109 state
->markpos_block
= 0L;
2110 state
->markpos_offset
= 0;
2111 state
->markpos_eof
= false;
2114 elog(ERROR
, "invalid tuplesort state");
2118 MemoryContextSwitchTo(oldcontext
);
2122 * tuplesort_markpos - saves current position in the merged sort file
2125 tuplesort_markpos(Tuplesortstate
*state
)
2127 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
2129 Assert(state
->randomAccess
);
2131 switch (state
->status
)
2133 case TSS_SORTEDINMEM
:
2134 state
->markpos_offset
= state
->current
;
2135 state
->markpos_eof
= state
->eof_reached
;
2137 case TSS_SORTEDONTAPE
:
2138 LogicalTapeTell(state
->tapeset
,
2140 &state
->markpos_block
,
2141 &state
->markpos_offset
);
2142 state
->markpos_eof
= state
->eof_reached
;
2145 elog(ERROR
, "invalid tuplesort state");
2149 MemoryContextSwitchTo(oldcontext
);
2153 * tuplesort_restorepos - restores current position in merged sort file to
2154 * last saved position
2157 tuplesort_restorepos(Tuplesortstate
*state
)
2159 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
2161 Assert(state
->randomAccess
);
2163 switch (state
->status
)
2165 case TSS_SORTEDINMEM
:
2166 state
->current
= state
->markpos_offset
;
2167 state
->eof_reached
= state
->markpos_eof
;
2169 case TSS_SORTEDONTAPE
:
2170 if (!LogicalTapeSeek(state
->tapeset
,
2172 state
->markpos_block
,
2173 state
->markpos_offset
))
2174 elog(ERROR
, "tuplesort_restorepos failed");
2175 state
->eof_reached
= state
->markpos_eof
;
2178 elog(ERROR
, "invalid tuplesort state");
2182 MemoryContextSwitchTo(oldcontext
);
2186 * tuplesort_explain - produce a line of information for EXPLAIN ANALYZE
2188 * This can be called after tuplesort_performsort() finishes to obtain
2189 * printable summary information about how the sort was performed.
2191 * The result is a palloc'd string.
2194 tuplesort_explain(Tuplesortstate
*state
)
2196 char *result
= (char *) palloc(100);
2200 * Note: it might seem we should print both memory and disk usage for a
2201 * disk-based sort. However, the current code doesn't track memory space
2202 * accurately once we have begun to return tuples to the caller (since we
2203 * don't account for pfree's the caller is expected to do), so we cannot
2204 * rely on availMem in a disk sort. This does not seem worth the overhead
2205 * to fix. Is it worth creating an API for the memory context code to
2206 * tell us how much is actually used in sortcontext?
2209 spaceUsed
= LogicalTapeSetBlocks(state
->tapeset
) * (BLCKSZ
/ 1024);
2211 spaceUsed
= (state
->allowedMem
- state
->availMem
+ 1023) / 1024;
2213 switch (state
->status
)
2215 case TSS_SORTEDINMEM
:
2216 if (state
->boundUsed
)
2217 snprintf(result
, 100,
2218 "Sort Method: top-N heapsort Memory: %ldkB",
2221 snprintf(result
, 100,
2222 "Sort Method: quicksort Memory: %ldkB",
2225 case TSS_SORTEDONTAPE
:
2226 snprintf(result
, 100,
2227 "Sort Method: external sort Disk: %ldkB",
2230 case TSS_FINALMERGE
:
2231 snprintf(result
, 100,
2232 "Sort Method: external merge Disk: %ldkB",
2236 snprintf(result
, 100, "sort still in progress");
2245 * Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
2247 * Compare two SortTuples. If checkIndex is true, use the tuple index
2248 * as the front of the sort key; otherwise, no.
2251 #define HEAPCOMPARE(tup1,tup2) \
2252 (checkIndex && ((tup1)->tupindex != (tup2)->tupindex) ? \
2253 ((tup1)->tupindex) - ((tup2)->tupindex) : \
2254 COMPARETUP(state, tup1, tup2))
2257 * Convert the existing unordered array of SortTuples to a bounded heap,
2258 * discarding all but the smallest "state->bound" tuples.
2260 * When working with a bounded heap, we want to keep the largest entry
2261 * at the root (array entry zero), instead of the smallest as in the normal
2262 * sort case. This allows us to discard the largest entry cheaply.
2263 * Therefore, we temporarily reverse the sort direction.
2265 * We assume that all entries in a bounded heap will always have tupindex
2266 * zero; it therefore doesn't matter that HEAPCOMPARE() doesn't reverse
2267 * the direction of comparison for tupindexes.
2270 make_bounded_heap(Tuplesortstate
*state
)
2272 int tupcount
= state
->memtupcount
;
2275 Assert(state
->status
== TSS_INITIAL
);
2276 Assert(state
->bounded
);
2277 Assert(tupcount
>= state
->bound
);
2279 /* Reverse sort direction so largest entry will be at root */
2280 REVERSEDIRECTION(state
);
2282 state
->memtupcount
= 0; /* make the heap empty */
2283 for (i
= 0; i
< tupcount
; i
++)
2285 if (state
->memtupcount
>= state
->bound
&&
2286 COMPARETUP(state
, &state
->memtuples
[i
], &state
->memtuples
[0]) <= 0)
2288 /* New tuple would just get thrown out, so skip it */
2289 free_sort_tuple(state
, &state
->memtuples
[i
]);
2293 /* Insert next tuple into heap */
2294 /* Must copy source tuple to avoid possible overwrite */
2295 SortTuple stup
= state
->memtuples
[i
];
2297 tuplesort_heap_insert(state
, &stup
, 0, false);
2299 /* If heap too full, discard largest entry */
2300 if (state
->memtupcount
> state
->bound
)
2302 free_sort_tuple(state
, &state
->memtuples
[0]);
2303 tuplesort_heap_siftup(state
, false);
2308 Assert(state
->memtupcount
== state
->bound
);
2309 state
->status
= TSS_BOUNDED
;
2313 * Convert the bounded heap to a properly-sorted array
2316 sort_bounded_heap(Tuplesortstate
*state
)
2318 int tupcount
= state
->memtupcount
;
2320 Assert(state
->status
== TSS_BOUNDED
);
2321 Assert(state
->bounded
);
2322 Assert(tupcount
== state
->bound
);
2325 * We can unheapify in place because each sift-up will remove the largest
2326 * entry, which we can promptly store in the newly freed slot at the end.
2327 * Once we're down to a single-entry heap, we're done.
2329 while (state
->memtupcount
> 1)
2331 SortTuple stup
= state
->memtuples
[0];
2333 /* this sifts-up the next-largest entry and decreases memtupcount */
2334 tuplesort_heap_siftup(state
, false);
2335 state
->memtuples
[state
->memtupcount
] = stup
;
2337 state
->memtupcount
= tupcount
;
2340 * Reverse sort direction back to the original state. This is not
2341 * actually necessary but seems like a good idea for tidiness.
2343 REVERSEDIRECTION(state
);
2345 state
->status
= TSS_SORTEDINMEM
;
2346 state
->boundUsed
= true;
2350 * Insert a new tuple into an empty or existing heap, maintaining the
2351 * heap invariant. Caller is responsible for ensuring there's room.
2353 * Note: we assume *tuple is a temporary variable that can be scribbled on.
2354 * For some callers, tuple actually points to a memtuples[] entry above the
2355 * end of the heap. This is safe as long as it's not immediately adjacent
2356 * to the end of the heap (ie, in the [memtupcount] array entry) --- if it
2357 * is, it might get overwritten before being moved into the heap!
2360 tuplesort_heap_insert(Tuplesortstate
*state
, SortTuple
*tuple
,
2361 int tupleindex
, bool checkIndex
)
2363 SortTuple
*memtuples
;
2367 * Save the tupleindex --- see notes above about writing on *tuple. It's a
2368 * historical artifact that tupleindex is passed as a separate argument
2369 * and not in *tuple, but it's notationally convenient so let's leave it
2372 tuple
->tupindex
= tupleindex
;
2374 memtuples
= state
->memtuples
;
2375 Assert(state
->memtupcount
< state
->memtupsize
);
2378 * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
2379 * using 1-based array indexes, not 0-based.
2381 j
= state
->memtupcount
++;
2384 int i
= (j
- 1) >> 1;
2386 if (HEAPCOMPARE(tuple
, &memtuples
[i
]) >= 0)
2388 memtuples
[j
] = memtuples
[i
];
2391 memtuples
[j
] = *tuple
;
2395 * The tuple at state->memtuples[0] has been removed from the heap.
2396 * Decrement memtupcount, and sift up to maintain the heap invariant.
2399 tuplesort_heap_siftup(Tuplesortstate
*state
, bool checkIndex
)
2401 SortTuple
*memtuples
= state
->memtuples
;
2406 if (--state
->memtupcount
<= 0)
2408 n
= state
->memtupcount
;
2409 tuple
= &memtuples
[n
]; /* tuple that must be reinserted */
2410 i
= 0; /* i is where the "hole" is */
2418 HEAPCOMPARE(&memtuples
[j
], &memtuples
[j
+ 1]) > 0)
2420 if (HEAPCOMPARE(tuple
, &memtuples
[j
]) <= 0)
2422 memtuples
[i
] = memtuples
[j
];
2425 memtuples
[i
] = *tuple
;
2430 * Tape interface routines
2434 getlen(Tuplesortstate
*state
, int tapenum
, bool eofOK
)
2438 if (LogicalTapeRead(state
->tapeset
, tapenum
, (void *) &len
,
2439 sizeof(len
)) != sizeof(len
))
2440 elog(ERROR
, "unexpected end of tape");
2441 if (len
== 0 && !eofOK
)
2442 elog(ERROR
, "unexpected end of data");
2447 markrunend(Tuplesortstate
*state
, int tapenum
)
2449 unsigned int len
= 0;
2451 LogicalTapeWrite(state
->tapeset
, tapenum
, (void *) &len
, sizeof(len
));
2456 * Set up for an external caller of ApplySortFunction. This function
2457 * basically just exists to localize knowledge of the encoding of sk_flags
2458 * used in this module.
2461 SelectSortFunction(Oid sortOperator
,
2468 if (!get_compare_function_for_ordering_op(sortOperator
,
2469 sortFunction
, &reverse
))
2470 elog(ERROR
, "operator %u is not a valid ordering operator",
2473 *sortFlags
= reverse
? SK_BT_DESC
: 0;
2475 *sortFlags
|= SK_BT_NULLS_FIRST
;
2479 * Inline-able copy of FunctionCall2() to save some cycles in sorting.
2482 myFunctionCall2(FmgrInfo
*flinfo
, Datum arg1
, Datum arg2
)
2484 FunctionCallInfoData fcinfo
;
2487 InitFunctionCallInfoData(fcinfo
, flinfo
, 2, NULL
, NULL
);
2489 fcinfo
.arg
[0] = arg1
;
2490 fcinfo
.arg
[1] = arg2
;
2491 fcinfo
.argnull
[0] = false;
2492 fcinfo
.argnull
[1] = false;
2494 result
= FunctionCallInvoke(&fcinfo
);
2496 /* Check for null result, since caller is clearly not expecting one */
2498 elog(ERROR
, "function %u returned NULL", fcinfo
.flinfo
->fn_oid
);
2504 * Apply a sort function (by now converted to fmgr lookup form)
2505 * and return a 3-way comparison result. This takes care of handling
2506 * reverse-sort and NULLs-ordering properly. We assume that DESC and
2507 * NULLS_FIRST options are encoded in sk_flags the same way btree does it.
2510 inlineApplySortFunction(FmgrInfo
*sortFunction
, int sk_flags
,
2511 Datum datum1
, bool isNull1
,
2512 Datum datum2
, bool isNull2
)
2519 compare
= 0; /* NULL "=" NULL */
2520 else if (sk_flags
& SK_BT_NULLS_FIRST
)
2521 compare
= -1; /* NULL "<" NOT_NULL */
2523 compare
= 1; /* NULL ">" NOT_NULL */
2527 if (sk_flags
& SK_BT_NULLS_FIRST
)
2528 compare
= 1; /* NOT_NULL ">" NULL */
2530 compare
= -1; /* NOT_NULL "<" NULL */
2534 compare
= DatumGetInt32(myFunctionCall2(sortFunction
,
2537 if (sk_flags
& SK_BT_DESC
)
2545 * Non-inline ApplySortFunction() --- this is needed only to conform to
2546 * C99's brain-dead notions about how to implement inline functions...
2549 ApplySortFunction(FmgrInfo
*sortFunction
, int sortFlags
,
2550 Datum datum1
, bool isNull1
,
2551 Datum datum2
, bool isNull2
)
2553 return inlineApplySortFunction(sortFunction
, sortFlags
,
2560 * Routines specialized for HeapTuple (actually MinimalTuple) case
2564 comparetup_heap(const SortTuple
*a
, const SortTuple
*b
, Tuplesortstate
*state
)
2566 ScanKey scanKey
= state
->scanKeys
;
2573 /* Allow interrupting long sorts */
2574 CHECK_FOR_INTERRUPTS();
2576 /* Compare the leading sort key */
2577 compare
= inlineApplySortFunction(&scanKey
->sk_func
, scanKey
->sk_flags
,
2578 a
->datum1
, a
->isnull1
,
2579 b
->datum1
, b
->isnull1
);
2583 /* Compare additional sort keys */
2584 ltup
.t_len
= ((MinimalTuple
) a
->tuple
)->t_len
+ MINIMAL_TUPLE_OFFSET
;
2585 ltup
.t_data
= (HeapTupleHeader
) ((char *) a
->tuple
- MINIMAL_TUPLE_OFFSET
);
2586 rtup
.t_len
= ((MinimalTuple
) b
->tuple
)->t_len
+ MINIMAL_TUPLE_OFFSET
;
2587 rtup
.t_data
= (HeapTupleHeader
) ((char *) b
->tuple
- MINIMAL_TUPLE_OFFSET
);
2588 tupDesc
= state
->tupDesc
;
2590 for (nkey
= 1; nkey
< state
->nKeys
; nkey
++, scanKey
++)
2592 AttrNumber attno
= scanKey
->sk_attno
;
2598 datum1
= heap_getattr(<up
, attno
, tupDesc
, &isnull1
);
2599 datum2
= heap_getattr(&rtup
, attno
, tupDesc
, &isnull2
);
2601 compare
= inlineApplySortFunction(&scanKey
->sk_func
, scanKey
->sk_flags
,
2612 copytup_heap(Tuplesortstate
*state
, SortTuple
*stup
, void *tup
)
2615 * We expect the passed "tup" to be a TupleTableSlot, and form a
2616 * MinimalTuple using the exported interface for that.
2618 TupleTableSlot
*slot
= (TupleTableSlot
*) tup
;
2622 /* copy the tuple into sort storage */
2623 tuple
= ExecCopySlotMinimalTuple(slot
);
2624 stup
->tuple
= (void *) tuple
;
2625 USEMEM(state
, GetMemoryChunkSpace(tuple
));
2626 /* set up first-column key value */
2627 htup
.t_len
= tuple
->t_len
+ MINIMAL_TUPLE_OFFSET
;
2628 htup
.t_data
= (HeapTupleHeader
) ((char *) tuple
- MINIMAL_TUPLE_OFFSET
);
2629 stup
->datum1
= heap_getattr(&htup
,
2630 state
->scanKeys
[0].sk_attno
,
2636 writetup_heap(Tuplesortstate
*state
, int tapenum
, SortTuple
*stup
)
2638 MinimalTuple tuple
= (MinimalTuple
) stup
->tuple
;
2639 /* the part of the MinimalTuple we'll write: */
2640 char *tupbody
= (char *) tuple
+ MINIMAL_TUPLE_DATA_OFFSET
;
2641 unsigned int tupbodylen
= tuple
->t_len
- MINIMAL_TUPLE_DATA_OFFSET
;
2642 /* total on-disk footprint: */
2643 unsigned int tuplen
= tupbodylen
+ sizeof(int);
2645 LogicalTapeWrite(state
->tapeset
, tapenum
,
2646 (void *) &tuplen
, sizeof(tuplen
));
2647 LogicalTapeWrite(state
->tapeset
, tapenum
,
2648 (void *) tupbody
, tupbodylen
);
2649 if (state
->randomAccess
) /* need trailing length word? */
2650 LogicalTapeWrite(state
->tapeset
, tapenum
,
2651 (void *) &tuplen
, sizeof(tuplen
));
2653 FREEMEM(state
, GetMemoryChunkSpace(tuple
));
2654 heap_free_minimal_tuple(tuple
);
2658 readtup_heap(Tuplesortstate
*state
, SortTuple
*stup
,
2659 int tapenum
, unsigned int len
)
2661 unsigned int tupbodylen
= len
- sizeof(int);
2662 unsigned int tuplen
= tupbodylen
+ MINIMAL_TUPLE_DATA_OFFSET
;
2663 MinimalTuple tuple
= (MinimalTuple
) palloc(tuplen
);
2664 char *tupbody
= (char *) tuple
+ MINIMAL_TUPLE_DATA_OFFSET
;
2667 USEMEM(state
, GetMemoryChunkSpace(tuple
));
2668 /* read in the tuple proper */
2669 tuple
->t_len
= tuplen
;
2670 if (LogicalTapeRead(state
->tapeset
, tapenum
,
2672 tupbodylen
) != (size_t) tupbodylen
)
2673 elog(ERROR
, "unexpected end of data");
2674 if (state
->randomAccess
) /* need trailing length word? */
2675 if (LogicalTapeRead(state
->tapeset
, tapenum
, (void *) &tuplen
,
2676 sizeof(tuplen
)) != sizeof(tuplen
))
2677 elog(ERROR
, "unexpected end of data");
2678 stup
->tuple
= (void *) tuple
;
2679 /* set up first-column key value */
2680 htup
.t_len
= tuple
->t_len
+ MINIMAL_TUPLE_OFFSET
;
2681 htup
.t_data
= (HeapTupleHeader
) ((char *) tuple
- MINIMAL_TUPLE_OFFSET
);
2682 stup
->datum1
= heap_getattr(&htup
,
2683 state
->scanKeys
[0].sk_attno
,
2689 reversedirection_heap(Tuplesortstate
*state
)
2691 ScanKey scanKey
= state
->scanKeys
;
2694 for (nkey
= 0; nkey
< state
->nKeys
; nkey
++, scanKey
++)
2696 scanKey
->sk_flags
^= (SK_BT_DESC
| SK_BT_NULLS_FIRST
);
2702 * Routines specialized for IndexTuple case
2704 * The btree and hash cases require separate comparison functions, but the
2705 * IndexTuple representation is the same so the copy/write/read support
2706 * functions can be shared.
2710 comparetup_index_btree(const SortTuple
*a
, const SortTuple
*b
,
2711 Tuplesortstate
*state
)
2714 * This is similar to _bt_tuplecompare(), but we have already done the
2715 * index_getattr calls for the first column, and we need to keep track of
2716 * whether any null fields are present. Also see the special treatment
2717 * for equal keys at the end.
2719 ScanKey scanKey
= state
->indexScanKey
;
2724 bool equal_hasnull
= false;
2728 /* Allow interrupting long sorts */
2729 CHECK_FOR_INTERRUPTS();
2731 /* Compare the leading sort key */
2732 compare
= inlineApplySortFunction(&scanKey
->sk_func
, scanKey
->sk_flags
,
2733 a
->datum1
, a
->isnull1
,
2734 b
->datum1
, b
->isnull1
);
2738 /* they are equal, so we only need to examine one null flag */
2740 equal_hasnull
= true;
2742 /* Compare additional sort keys */
2743 tuple1
= (IndexTuple
) a
->tuple
;
2744 tuple2
= (IndexTuple
) b
->tuple
;
2745 keysz
= state
->nKeys
;
2746 tupDes
= RelationGetDescr(state
->indexRel
);
2748 for (nkey
= 2; nkey
<= keysz
; nkey
++, scanKey
++)
2755 datum1
= index_getattr(tuple1
, nkey
, tupDes
, &isnull1
);
2756 datum2
= index_getattr(tuple2
, nkey
, tupDes
, &isnull2
);
2758 compare
= inlineApplySortFunction(&scanKey
->sk_func
, scanKey
->sk_flags
,
2762 return compare
; /* done when we find unequal attributes */
2764 /* they are equal, so we only need to examine one null flag */
2766 equal_hasnull
= true;
2770 * If btree has asked us to enforce uniqueness, complain if two equal
2771 * tuples are detected (unless there was at least one NULL field).
2773 * It is sufficient to make the test here, because if two tuples are equal
2774 * they *must* get compared at some stage of the sort --- otherwise the
2775 * sort algorithm wouldn't have checked whether one must appear before the
2778 * Some rather brain-dead implementations of qsort will sometimes call the
2779 * comparison routine to compare a value to itself. (At this writing only
2780 * QNX 4 is known to do such silly things; we don't support QNX anymore,
2781 * but perhaps the behavior still exists elsewhere.) Don't raise a bogus
2782 * error in that case.
2784 if (state
->enforceUnique
&& !equal_hasnull
&& tuple1
!= tuple2
)
2786 (errcode(ERRCODE_UNIQUE_VIOLATION
),
2787 errmsg("could not create unique index \"%s\"",
2788 RelationGetRelationName(state
->indexRel
)),
2789 errdetail("Table contains duplicated values.")));
2792 * If key values are equal, we sort on ItemPointer. This does not affect
2793 * validity of the finished index, but it offers cheap insurance against
2794 * performance problems with bad qsort implementations that have trouble
2795 * with large numbers of equal keys.
2798 BlockNumber blk1
= ItemPointerGetBlockNumber(&tuple1
->t_tid
);
2799 BlockNumber blk2
= ItemPointerGetBlockNumber(&tuple2
->t_tid
);
2802 return (blk1
< blk2
) ? -1 : 1;
2805 OffsetNumber pos1
= ItemPointerGetOffsetNumber(&tuple1
->t_tid
);
2806 OffsetNumber pos2
= ItemPointerGetOffsetNumber(&tuple2
->t_tid
);
2809 return (pos1
< pos2
) ? -1 : 1;
2816 comparetup_index_hash(const SortTuple
*a
, const SortTuple
*b
,
2817 Tuplesortstate
*state
)
2824 /* Allow interrupting long sorts */
2825 CHECK_FOR_INTERRUPTS();
2828 * Fetch hash keys and mask off bits we don't want to sort by.
2829 * We know that the first column of the index tuple is the hash key.
2831 Assert(!a
->isnull1
);
2832 hash1
= DatumGetUInt32(a
->datum1
) & state
->hash_mask
;
2833 Assert(!b
->isnull1
);
2834 hash2
= DatumGetUInt32(b
->datum1
) & state
->hash_mask
;
2838 else if (hash1
< hash2
)
2842 * If hash values are equal, we sort on ItemPointer. This does not affect
2843 * validity of the finished index, but it offers cheap insurance against
2844 * performance problems with bad qsort implementations that have trouble
2845 * with large numbers of equal keys.
2847 tuple1
= (IndexTuple
) a
->tuple
;
2848 tuple2
= (IndexTuple
) b
->tuple
;
2851 BlockNumber blk1
= ItemPointerGetBlockNumber(&tuple1
->t_tid
);
2852 BlockNumber blk2
= ItemPointerGetBlockNumber(&tuple2
->t_tid
);
2855 return (blk1
< blk2
) ? -1 : 1;
2858 OffsetNumber pos1
= ItemPointerGetOffsetNumber(&tuple1
->t_tid
);
2859 OffsetNumber pos2
= ItemPointerGetOffsetNumber(&tuple2
->t_tid
);
2862 return (pos1
< pos2
) ? -1 : 1;
2869 copytup_index(Tuplesortstate
*state
, SortTuple
*stup
, void *tup
)
2871 IndexTuple tuple
= (IndexTuple
) tup
;
2872 unsigned int tuplen
= IndexTupleSize(tuple
);
2873 IndexTuple newtuple
;
2875 /* copy the tuple into sort storage */
2876 newtuple
= (IndexTuple
) palloc(tuplen
);
2877 memcpy(newtuple
, tuple
, tuplen
);
2878 USEMEM(state
, GetMemoryChunkSpace(newtuple
));
2879 stup
->tuple
= (void *) newtuple
;
2880 /* set up first-column key value */
2881 stup
->datum1
= index_getattr(newtuple
,
2883 RelationGetDescr(state
->indexRel
),
2888 writetup_index(Tuplesortstate
*state
, int tapenum
, SortTuple
*stup
)
2890 IndexTuple tuple
= (IndexTuple
) stup
->tuple
;
2891 unsigned int tuplen
;
2893 tuplen
= IndexTupleSize(tuple
) + sizeof(tuplen
);
2894 LogicalTapeWrite(state
->tapeset
, tapenum
,
2895 (void *) &tuplen
, sizeof(tuplen
));
2896 LogicalTapeWrite(state
->tapeset
, tapenum
,
2897 (void *) tuple
, IndexTupleSize(tuple
));
2898 if (state
->randomAccess
) /* need trailing length word? */
2899 LogicalTapeWrite(state
->tapeset
, tapenum
,
2900 (void *) &tuplen
, sizeof(tuplen
));
2902 FREEMEM(state
, GetMemoryChunkSpace(tuple
));
2907 readtup_index(Tuplesortstate
*state
, SortTuple
*stup
,
2908 int tapenum
, unsigned int len
)
2910 unsigned int tuplen
= len
- sizeof(unsigned int);
2911 IndexTuple tuple
= (IndexTuple
) palloc(tuplen
);
2913 USEMEM(state
, GetMemoryChunkSpace(tuple
));
2914 if (LogicalTapeRead(state
->tapeset
, tapenum
, (void *) tuple
,
2916 elog(ERROR
, "unexpected end of data");
2917 if (state
->randomAccess
) /* need trailing length word? */
2918 if (LogicalTapeRead(state
->tapeset
, tapenum
, (void *) &tuplen
,
2919 sizeof(tuplen
)) != sizeof(tuplen
))
2920 elog(ERROR
, "unexpected end of data");
2921 stup
->tuple
= (void *) tuple
;
2922 /* set up first-column key value */
2923 stup
->datum1
= index_getattr(tuple
,
2925 RelationGetDescr(state
->indexRel
),
2930 reversedirection_index_btree(Tuplesortstate
*state
)
2932 ScanKey scanKey
= state
->indexScanKey
;
2935 for (nkey
= 0; nkey
< state
->nKeys
; nkey
++, scanKey
++)
2937 scanKey
->sk_flags
^= (SK_BT_DESC
| SK_BT_NULLS_FIRST
);
2942 reversedirection_index_hash(Tuplesortstate
*state
)
2944 /* We don't support reversing direction in a hash index sort */
2945 elog(ERROR
, "reversedirection_index_hash is not implemented");
2950 * Routines specialized for DatumTuple case
2954 comparetup_datum(const SortTuple
*a
, const SortTuple
*b
, Tuplesortstate
*state
)
2956 /* Allow interrupting long sorts */
2957 CHECK_FOR_INTERRUPTS();
2959 return inlineApplySortFunction(&state
->sortOpFn
, state
->sortFnFlags
,
2960 a
->datum1
, a
->isnull1
,
2961 b
->datum1
, b
->isnull1
);
2965 copytup_datum(Tuplesortstate
*state
, SortTuple
*stup
, void *tup
)
2967 /* Not currently needed */
2968 elog(ERROR
, "copytup_datum() should not be called");
2972 writetup_datum(Tuplesortstate
*state
, int tapenum
, SortTuple
*stup
)
2975 unsigned int tuplen
;
2976 unsigned int writtenlen
;
2983 else if (state
->datumTypeByVal
)
2985 waddr
= &stup
->datum1
;
2986 tuplen
= sizeof(Datum
);
2990 waddr
= DatumGetPointer(stup
->datum1
);
2991 tuplen
= datumGetSize(stup
->datum1
, false, state
->datumTypeLen
);
2992 Assert(tuplen
!= 0);
2995 writtenlen
= tuplen
+ sizeof(unsigned int);
2997 LogicalTapeWrite(state
->tapeset
, tapenum
,
2998 (void *) &writtenlen
, sizeof(writtenlen
));
2999 LogicalTapeWrite(state
->tapeset
, tapenum
,
3001 if (state
->randomAccess
) /* need trailing length word? */
3002 LogicalTapeWrite(state
->tapeset
, tapenum
,
3003 (void *) &writtenlen
, sizeof(writtenlen
));
3007 FREEMEM(state
, GetMemoryChunkSpace(stup
->tuple
));
3013 readtup_datum(Tuplesortstate
*state
, SortTuple
*stup
,
3014 int tapenum
, unsigned int len
)
3016 unsigned int tuplen
= len
- sizeof(unsigned int);
3021 stup
->datum1
= (Datum
) 0;
3022 stup
->isnull1
= true;
3025 else if (state
->datumTypeByVal
)
3027 Assert(tuplen
== sizeof(Datum
));
3028 if (LogicalTapeRead(state
->tapeset
, tapenum
, (void *) &stup
->datum1
,
3030 elog(ERROR
, "unexpected end of data");
3031 stup
->isnull1
= false;
3036 void *raddr
= palloc(tuplen
);
3038 if (LogicalTapeRead(state
->tapeset
, tapenum
, raddr
,
3040 elog(ERROR
, "unexpected end of data");
3041 stup
->datum1
= PointerGetDatum(raddr
);
3042 stup
->isnull1
= false;
3043 stup
->tuple
= raddr
;
3044 USEMEM(state
, GetMemoryChunkSpace(raddr
));
3047 if (state
->randomAccess
) /* need trailing length word? */
3048 if (LogicalTapeRead(state
->tapeset
, tapenum
, (void *) &tuplen
,
3049 sizeof(tuplen
)) != sizeof(tuplen
))
3050 elog(ERROR
, "unexpected end of data");
3054 reversedirection_datum(Tuplesortstate
*state
)
3056 state
->sortFnFlags
^= (SK_BT_DESC
| SK_BT_NULLS_FIRST
);
3060 * Convenience routine to free a tuple previously loaded into sort memory
3063 free_sort_tuple(Tuplesortstate
*state
, SortTuple
*stup
)
3065 FREEMEM(state
, GetMemoryChunkSpace(stup
->tuple
));