1 /*-------------------------------------------------------------------------
4 * Generalized tuple sorting routines.
6 * This module handles sorting of heap tuples, index tuples, or single
7 * Datums (and could easily support other kinds of sortable objects,
8 * if necessary). It works efficiently for both small and large amounts
9 * of data. Small amounts are sorted in-memory using qsort(). Large
10 * amounts are sorted using temporary files and a standard external sort
13 * See Knuth, volume 3, for more than you want to know about the external
14 * sorting algorithm. We divide the input into sorted runs using replacement
15 * selection, in the form of a priority tree implemented as a heap
16 * (essentially his Algorithm 5.2.3H), then merge the runs using polyphase
17 * merge, Knuth's Algorithm 5.4.2D. The logical "tapes" used by Algorithm D
18 * are implemented by logtape.c, which avoids space wastage by recycling
19 * disk space as soon as each block is read from its "tape".
21 * We do not form the initial runs using Knuth's recommended replacement
22 * selection data structure (Algorithm 5.4.1R), because it uses a fixed
23 * number of records in memory at all times. Since we are dealing with
24 * tuples that may vary considerably in size, we want to be able to vary
25 * the number of records kept in memory to ensure full utilization of the
26 * allowed sort memory space. So, we keep the tuples in a variable-size
27 * heap, with the next record to go out at the top of the heap. Like
28 * Algorithm 5.4.1R, each record is stored with the run number that it
29 * must go into, and we use (run number, key) as the ordering key for the
30 * heap. When the run number at the top of the heap changes, we know that
31 * no more records of the prior run are left in the heap.
33 * The approximate amount of memory allowed for any one sort operation
34 * is specified in kilobytes by the caller (most pass work_mem). Initially,
35 * we absorb tuples and simply store them in an unsorted array as long as
36 * we haven't exceeded workMem. If we reach the end of the input without
37 * exceeding workMem, we sort the array using qsort() and subsequently return
38 * tuples just by scanning the tuple array sequentially. If we do exceed
39 * workMem, we construct a heap using Algorithm H and begin to emit tuples
40 * into sorted runs in temporary tapes, emitting just enough tuples at each
41 * step to get back within the workMem limit. Whenever the run number at
42 * the top of the heap changes, we begin a new run with a new output tape
43 * (selected per Algorithm D). After the end of the input is reached,
44 * we dump out remaining tuples in memory into a final run (or two),
45 * then merge the runs using Algorithm D.
47 * When merging runs, we use a heap containing just the frontmost tuple from
48 * each source run; we repeatedly output the smallest tuple and insert the
49 * next tuple from its source tape (if any). When the heap empties, the merge
50 * is complete. The basic merge algorithm thus needs very little memory ---
51 * only M tuples for an M-way merge, and M is constrained to a small number.
52 * However, we can still make good use of our full workMem allocation by
53 * pre-reading additional tuples from each source tape. Without prereading,
54 * our access pattern to the temporary file would be very erratic; on average
55 * we'd read one block from each of M source tapes during the same time that
56 * we're writing M blocks to the output tape, so there is no sequentiality of
57 * access at all, defeating the read-ahead methods used by most Unix kernels.
58 * Worse, the output tape gets written into a very random sequence of blocks
59 * of the temp file, ensuring that things will be even worse when it comes
60 * time to read that tape. A straightforward merge pass thus ends up doing a
61 * lot of waiting for disk seeks. We can improve matters by prereading from
62 * each source tape sequentially, loading about workMem/M bytes from each tape
63 * in turn. Then we run the merge algorithm, writing but not reading until
64 * one of the preloaded tuple series runs out. Then we switch back to preread
65 * mode, fill memory again, and repeat. This approach helps to localize both
66 * read and write accesses.
68 * When the caller requests random access to the sort result, we form
69 * the final sorted run on a logical tape which is then "frozen", so
70 * that we can access it randomly. When the caller does not need random
71 * access, we return from tuplesort_performsort() as soon as we are down
72 * to one run per logical tape. The final merge is then performed
73 * on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
74 * saves one cycle of writing all the data out to disk and reading it in.
76 * Before Postgres 8.2, we always used a seven-tape polyphase merge, on the
77 * grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
78 * to Knuth's figure 70 (section 5.4.2). However, Knuth is assuming that
79 * tape drives are expensive beasts, and in particular that there will always
80 * be many more runs than tape drives. In our implementation a "tape drive"
81 * doesn't cost much more than a few Kb of memory buffers, so we can afford
82 * to have lots of them. In particular, if we can have as many tape drives
83 * as sorted runs, we can eliminate any repeated I/O at all. In the current
84 * code we determine the number of tapes M on the basis of workMem: we want
85 * workMem/M to be large enough that we read a fair amount of data each time
86 * we preread from a tape, so as to maintain the locality of access described
87 * above. Nonetheless, with large workMem we can have many tapes.
90 * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
91 * Portions Copyright (c) 1994, Regents of the University of California
96 *-------------------------------------------------------------------------
103 #include "access/genam.h"
104 #include "access/nbtree.h"
105 #include "catalog/pg_amop.h"
106 #include "catalog/pg_operator.h"
107 #include "commands/tablespace.h"
108 #include "miscadmin.h"
109 #include "pg_trace.h"
110 #include "utils/datum.h"
111 #include "utils/logtape.h"
112 #include "utils/lsyscache.h"
113 #include "utils/memutils.h"
114 #include "utils/pg_rusage.h"
115 #include "utils/rel.h"
116 #include "utils/syscache.h"
117 #include "utils/tuplesort.h"
120 /* sort-type codes for sort__start probes */
127 bool trace_sort
= false;
130 #ifdef DEBUG_BOUNDED_SORT
131 bool optimize_bounded_sort
= true;
136 * The objects we actually sort are SortTuple structs. These contain
137 * a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
138 * which is a separate palloc chunk --- we assume it is just one chunk and
139 * can be freed by a simple pfree(). SortTuples also contain the tuple's
140 * first key column in Datum/nullflag format, and an index integer.
142 * Storing the first key column lets us save heap_getattr or index_getattr
143 * calls during tuple comparisons. We could extract and save all the key
144 * columns not just the first, but this would increase code complexity and
145 * overhead, and wouldn't actually save any comparison cycles in the common
146 * case where the first key determines the comparison result. Note that
147 * for a pass-by-reference datatype, datum1 points into the "tuple" storage.
149 * When sorting single Datums, the data value is represented directly by
150 * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
151 * then datum1 points to a separately palloc'd data value that is also pointed
152 * to by the "tuple" pointer; otherwise "tuple" is NULL.
154 * While building initial runs, tupindex holds the tuple's run number. During
155 * merge passes, we re-use it to hold the input tape number that each tuple in
156 * the heap was read from, or to hold the index of the next tuple pre-read
157 * from the same tape in the case of pre-read entries. tupindex goes unused
158 * if the sort occurs entirely in memory.
162 void *tuple
; /* the tuple proper */
163 Datum datum1
; /* value of first key column */
164 bool isnull1
; /* is first key column NULL? */
165 int tupindex
; /* see notes above */
170 * Possible states of a Tuplesort object. These denote the states that
171 * persist between calls of Tuplesort routines.
175 TSS_INITIAL
, /* Loading tuples; still within memory limit */
176 TSS_BOUNDED
, /* Loading tuples into bounded-size heap */
177 TSS_BUILDRUNS
, /* Loading tuples; writing to tape */
178 TSS_SORTEDINMEM
, /* Sort completed entirely in memory */
179 TSS_SORTEDONTAPE
, /* Sort completed, final run is on tape */
180 TSS_FINALMERGE
/* Performing final merge on-the-fly */
184 * Parameters for calculation of number of tapes to use --- see inittapes()
185 * and tuplesort_merge_order().
187 * In this calculation we assume that each tape will cost us about 3 blocks
188 * worth of buffer space (which is an underestimate for very large data
189 * volumes, but it's probably close enough --- see logtape.c).
191 * MERGE_BUFFER_SIZE is how much data we'd like to read from each input
192 * tape during a preread cycle (see discussion at top of file).
194 #define MINORDER 6 /* minimum merge order */
195 #define TAPE_BUFFER_OVERHEAD (BLCKSZ * 3)
196 #define MERGE_BUFFER_SIZE (BLCKSZ * 32)
199 * Private state of a Tuplesort operation.
201 struct Tuplesortstate
203 TupSortStatus status
; /* enumerated value as shown above */
204 int nKeys
; /* number of columns in sort key */
205 bool randomAccess
; /* did caller request random access? */
206 bool bounded
; /* did caller specify a maximum number of
207 * tuples to return? */
208 bool boundUsed
; /* true if we made use of a bounded heap */
209 int bound
; /* if bounded, the maximum number of tuples */
210 long availMem
; /* remaining memory available, in bytes */
211 long allowedMem
; /* total memory allowed, in bytes */
212 int maxTapes
; /* number of tapes (Knuth's T) */
213 int tapeRange
; /* maxTapes-1 (Knuth's P) */
214 MemoryContext sortcontext
; /* memory context holding all sort data */
215 LogicalTapeSet
*tapeset
; /* logtape.c object for tapes in a temp file */
218 * These function pointers decouple the routines that must know what kind
219 * of tuple we are sorting from the routines that don't need to know it.
220 * They are set up by the tuplesort_begin_xxx routines.
222 * Function to compare two tuples; result is per qsort() convention, ie:
223 * <0, 0, >0 according as a<b, a=b, a>b. The API must match
224 * qsort_arg_comparator.
226 int (*comparetup
) (const SortTuple
*a
, const SortTuple
*b
,
227 Tuplesortstate
*state
);
230 * Function to copy a supplied input tuple into palloc'd space and set up
231 * its SortTuple representation (ie, set tuple/datum1/isnull1). Also,
232 * state->availMem must be decreased by the amount of space used for the
233 * tuple copy (note the SortTuple struct itself is not counted).
235 void (*copytup
) (Tuplesortstate
*state
, SortTuple
*stup
, void *tup
);
238 * Function to write a stored tuple onto tape. The representation of the
239 * tuple on tape need not be the same as it is in memory; requirements on
240 * the tape representation are given below. After writing the tuple,
241 * pfree() the out-of-line data (not the SortTuple struct!), and increase
242 * state->availMem by the amount of memory space thereby released.
244 void (*writetup
) (Tuplesortstate
*state
, int tapenum
,
248 * Function to read a stored tuple from tape back into memory. 'len' is
249 * the already-read length of the stored tuple. Create a palloc'd copy,
250 * initialize tuple/datum1/isnull1 in the target SortTuple struct, and
251 * decrease state->availMem by the amount of memory space consumed.
253 void (*readtup
) (Tuplesortstate
*state
, SortTuple
*stup
,
254 int tapenum
, unsigned int len
);
257 * Function to reverse the sort direction from its current state. (We
258 * could dispense with this if we wanted to enforce that all variants
259 * represent the sort key information alike.)
261 void (*reversedirection
) (Tuplesortstate
*state
);
264 * This array holds the tuples now in sort memory. If we are in state
265 * INITIAL, the tuples are in no particular order; if we are in state
266 * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
267 * and FINALMERGE, the tuples are organized in "heap" order per Algorithm
268 * H. (Note that memtupcount only counts the tuples that are part of the
269 * heap --- during merge passes, memtuples[] entries beyond tapeRange are
270 * never in the heap and are used to hold pre-read tuples.) In state
271 * SORTEDONTAPE, the array is not used.
273 SortTuple
*memtuples
; /* array of SortTuple structs */
274 int memtupcount
; /* number of tuples currently present */
275 int memtupsize
; /* allocated length of memtuples array */
278 * While building initial runs, this is the current output run number
279 * (starting at 0). Afterwards, it is the number of initial runs we made.
284 * Unless otherwise noted, all pointer variables below are pointers to
285 * arrays of length maxTapes, holding per-tape data.
289 * These variables are only used during merge passes. mergeactive[i] is
290 * true if we are reading an input run from (actual) tape number i and
291 * have not yet exhausted that run. mergenext[i] is the memtuples index
292 * of the next pre-read tuple (next to be loaded into the heap) for tape
293 * i, or 0 if we are out of pre-read tuples. mergelast[i] similarly
294 * points to the last pre-read tuple from each tape. mergeavailslots[i]
295 * is the number of unused memtuples[] slots reserved for tape i, and
296 * mergeavailmem[i] is the amount of unused space allocated for tape i.
297 * mergefreelist and mergefirstfree keep track of unused locations in the
298 * memtuples[] array. The memtuples[].tupindex fields link together
299 * pre-read tuples for each tape as well as recycled locations in
300 * mergefreelist. It is OK to use 0 as a null link in these lists, because
301 * memtuples[0] is part of the merge heap and is never a pre-read tuple.
303 bool *mergeactive
; /* active input run source? */
304 int *mergenext
; /* first preread tuple for each source */
305 int *mergelast
; /* last preread tuple for each source */
306 int *mergeavailslots
; /* slots left for prereading each tape */
307 long *mergeavailmem
; /* availMem for prereading each tape */
308 int mergefreelist
; /* head of freelist of recycled slots */
309 int mergefirstfree
; /* first slot never used in this merge */
312 * Variables for Algorithm D. Note that destTape is a "logical" tape
313 * number, ie, an index into the tp_xxx[] arrays. Be careful to keep
314 * "logical" and "actual" tape numbers straight!
316 int Level
; /* Knuth's l */
317 int destTape
; /* current output tape (Knuth's j, less 1) */
318 int *tp_fib
; /* Target Fibonacci run counts (A[]) */
319 int *tp_runs
; /* # of real runs on each tape */
320 int *tp_dummy
; /* # of dummy runs for each tape (D[]) */
321 int *tp_tapenum
; /* Actual tape numbers (TAPE[]) */
322 int activeTapes
; /* # of active input tapes in merge pass */
325 * These variables are used after completion of sorting to keep track of
326 * the next tuple to return. (In the tape case, the tape's current read
327 * position is also critical state.)
329 int result_tape
; /* actual tape number of finished output */
330 int current
; /* array index (only used if SORTEDINMEM) */
331 bool eof_reached
; /* reached EOF (needed for cursors) */
333 /* markpos_xxx holds marked position for mark and restore */
334 long markpos_block
; /* tape block# (only used if SORTEDONTAPE) */
335 int markpos_offset
; /* saved "current", or offset in tape block */
336 bool markpos_eof
; /* saved "eof_reached" */
339 * These variables are specific to the MinimalTuple case; they are set by
340 * tuplesort_begin_heap and used only by the MinimalTuple routines.
343 ScanKey scanKeys
; /* array of length nKeys */
346 * These variables are specific to the IndexTuple case; they are set by
347 * tuplesort_begin_index_xxx and used only by the IndexTuple routines.
349 Relation indexRel
; /* index being built */
351 /* These are specific to the index_btree subcase: */
352 ScanKey indexScanKey
;
353 bool enforceUnique
; /* complain if we find duplicate tuples */
355 /* These are specific to the index_hash subcase: */
356 uint32 hash_mask
; /* mask for sortable part of hash code */
359 * These variables are specific to the Datum case; they are set by
360 * tuplesort_begin_datum and used only by the DatumTuple routines.
363 FmgrInfo sortOpFn
; /* cached lookup data for sortOperator */
364 int sortFnFlags
; /* equivalent to sk_flags */
365 /* we need typelen and byval in order to know how to copy the Datums. */
370 * Resource snapshot for time of sort start.
377 #define COMPARETUP(state,a,b) ((*(state)->comparetup) (a, b, state))
378 #define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
379 #define WRITETUP(state,tape,stup) ((*(state)->writetup) (state, tape, stup))
380 #define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len))
381 #define REVERSEDIRECTION(state) ((*(state)->reversedirection) (state))
382 #define LACKMEM(state) ((state)->availMem < 0)
383 #define USEMEM(state,amt) ((state)->availMem -= (amt))
384 #define FREEMEM(state,amt) ((state)->availMem += (amt))
387 * NOTES about on-tape representation of tuples:
389 * We require the first "unsigned int" of a stored tuple to be the total size
390 * on-tape of the tuple, including itself (so it is never zero; an all-zero
391 * unsigned int is used to delimit runs). The remainder of the stored tuple
392 * may or may not match the in-memory representation of the tuple ---
393 * any conversion needed is the job of the writetup and readtup routines.
395 * If state->randomAccess is true, then the stored representation of the
396 * tuple must be followed by another "unsigned int" that is a copy of the
397 * length --- so the total tape space used is actually sizeof(unsigned int)
398 * more than the stored length value. This allows read-backwards. When
399 * randomAccess is not true, the write/read routines may omit the extra
402 * writetup is expected to write both length words as well as the tuple
403 * data. When readtup is called, the tape is positioned just after the
404 * front length word; readtup must read the tuple data and advance past
405 * the back length word (if present).
407 * The write/read routines can make use of the tuple description data
408 * stored in the Tuplesortstate record, if needed. They are also expected
409 * to adjust state->availMem by the amount of memory space (not tape space!)
410 * released or consumed. There is no error return from either writetup
411 * or readtup; they should ereport() on failure.
414 * NOTES about memory consumption calculations:
416 * We count space allocated for tuples against the workMem limit, plus
417 * the space used by the variable-size memtuples array. Fixed-size space
418 * is not counted; it's small enough to not be interesting.
420 * Note that we count actual space used (as shown by GetMemoryChunkSpace)
421 * rather than the originally-requested size. This is important since
422 * palloc can add substantial overhead. It's not a complete answer since
423 * we won't count any wasted space in palloc allocation blocks, but it's
424 * a lot better than what we were doing before 7.3.
428 static Tuplesortstate
*tuplesort_begin_common(int workMem
, bool randomAccess
);
429 static void puttuple_common(Tuplesortstate
*state
, SortTuple
*tuple
);
430 static void inittapes(Tuplesortstate
*state
);
431 static void selectnewtape(Tuplesortstate
*state
);
432 static void mergeruns(Tuplesortstate
*state
);
433 static void mergeonerun(Tuplesortstate
*state
);
434 static void beginmerge(Tuplesortstate
*state
);
435 static void mergepreread(Tuplesortstate
*state
);
436 static void mergeprereadone(Tuplesortstate
*state
, int srcTape
);
437 static void dumptuples(Tuplesortstate
*state
, bool alltuples
);
438 static void make_bounded_heap(Tuplesortstate
*state
);
439 static void sort_bounded_heap(Tuplesortstate
*state
);
440 static void tuplesort_heap_insert(Tuplesortstate
*state
, SortTuple
*tuple
,
441 int tupleindex
, bool checkIndex
);
442 static void tuplesort_heap_siftup(Tuplesortstate
*state
, bool checkIndex
);
443 static unsigned int getlen(Tuplesortstate
*state
, int tapenum
, bool eofOK
);
444 static void markrunend(Tuplesortstate
*state
, int tapenum
);
445 static int comparetup_heap(const SortTuple
*a
, const SortTuple
*b
,
446 Tuplesortstate
*state
);
447 static void copytup_heap(Tuplesortstate
*state
, SortTuple
*stup
, void *tup
);
448 static void writetup_heap(Tuplesortstate
*state
, int tapenum
,
450 static void readtup_heap(Tuplesortstate
*state
, SortTuple
*stup
,
451 int tapenum
, unsigned int len
);
452 static void reversedirection_heap(Tuplesortstate
*state
);
453 static int comparetup_index_btree(const SortTuple
*a
, const SortTuple
*b
,
454 Tuplesortstate
*state
);
455 static int comparetup_index_hash(const SortTuple
*a
, const SortTuple
*b
,
456 Tuplesortstate
*state
);
457 static void copytup_index(Tuplesortstate
*state
, SortTuple
*stup
, void *tup
);
458 static void writetup_index(Tuplesortstate
*state
, int tapenum
,
460 static void readtup_index(Tuplesortstate
*state
, SortTuple
*stup
,
461 int tapenum
, unsigned int len
);
462 static void reversedirection_index_btree(Tuplesortstate
*state
);
463 static void reversedirection_index_hash(Tuplesortstate
*state
);
464 static int comparetup_datum(const SortTuple
*a
, const SortTuple
*b
,
465 Tuplesortstate
*state
);
466 static void copytup_datum(Tuplesortstate
*state
, SortTuple
*stup
, void *tup
);
467 static void writetup_datum(Tuplesortstate
*state
, int tapenum
,
469 static void readtup_datum(Tuplesortstate
*state
, SortTuple
*stup
,
470 int tapenum
, unsigned int len
);
471 static void reversedirection_datum(Tuplesortstate
*state
);
472 static void free_sort_tuple(Tuplesortstate
*state
, SortTuple
*stup
);
476 * tuplesort_begin_xxx
478 * Initialize for a tuple sort operation.
480 * After calling tuplesort_begin, the caller should call tuplesort_putXXX
481 * zero or more times, then call tuplesort_performsort when all the tuples
482 * have been supplied. After performsort, retrieve the tuples in sorted
483 * order by calling tuplesort_getXXX until it returns false/NULL. (If random
484 * access was requested, rescan, markpos, and restorepos can also be called.)
485 * Call tuplesort_end to terminate the operation and release memory/disk space.
487 * Each variant of tuplesort_begin has a workMem parameter specifying the
488 * maximum number of kilobytes of RAM to use before spilling data to disk.
489 * (The normal value of this parameter is work_mem, but some callers use
490 * other values.) Each variant also has a randomAccess parameter specifying
491 * whether the caller needs non-sequential access to the sort result.
494 static Tuplesortstate
*
495 tuplesort_begin_common(int workMem
, bool randomAccess
)
497 Tuplesortstate
*state
;
498 MemoryContext sortcontext
;
499 MemoryContext oldcontext
;
502 * Create a working memory context for this sort operation. All data
503 * needed by the sort will live inside this context.
505 sortcontext
= AllocSetContextCreate(CurrentMemoryContext
,
507 ALLOCSET_DEFAULT_MINSIZE
,
508 ALLOCSET_DEFAULT_INITSIZE
,
509 ALLOCSET_DEFAULT_MAXSIZE
);
512 * Make the Tuplesortstate within the per-sort context. This way, we
513 * don't need a separate pfree() operation for it at shutdown.
515 oldcontext
= MemoryContextSwitchTo(sortcontext
);
517 state
= (Tuplesortstate
*) palloc0(sizeof(Tuplesortstate
));
521 pg_rusage_init(&state
->ru_start
);
524 state
->status
= TSS_INITIAL
;
525 state
->randomAccess
= randomAccess
;
526 state
->bounded
= false;
527 state
->boundUsed
= false;
528 state
->allowedMem
= workMem
* 1024L;
529 state
->availMem
= state
->allowedMem
;
530 state
->sortcontext
= sortcontext
;
531 state
->tapeset
= NULL
;
533 state
->memtupcount
= 0;
534 state
->memtupsize
= 1024; /* initial guess */
535 state
->memtuples
= (SortTuple
*) palloc(state
->memtupsize
* sizeof(SortTuple
));
537 USEMEM(state
, GetMemoryChunkSpace(state
->memtuples
));
539 /* workMem must be large enough for the minimal memtuples array */
541 elog(ERROR
, "insufficient memory allowed for sort");
543 state
->currentRun
= 0;
546 * maxTapes, tapeRange, and Algorithm D variables will be initialized by
547 * inittapes(), if needed
550 state
->result_tape
= -1; /* flag that result tape has not been formed */
552 MemoryContextSwitchTo(oldcontext
);
558 tuplesort_begin_heap(TupleDesc tupDesc
,
559 int nkeys
, AttrNumber
*attNums
,
560 Oid
*sortOperators
, bool *nullsFirstFlags
,
561 int workMem
, bool randomAccess
)
563 Tuplesortstate
*state
= tuplesort_begin_common(workMem
, randomAccess
);
564 MemoryContext oldcontext
;
567 oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
569 AssertArg(nkeys
> 0);
574 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
575 nkeys
, workMem
, randomAccess
? 't' : 'f');
578 state
->nKeys
= nkeys
;
580 TRACE_POSTGRESQL_SORT_START(HEAP_SORT
,
581 false, /* no unique check */
586 state
->comparetup
= comparetup_heap
;
587 state
->copytup
= copytup_heap
;
588 state
->writetup
= writetup_heap
;
589 state
->readtup
= readtup_heap
;
590 state
->reversedirection
= reversedirection_heap
;
592 state
->tupDesc
= tupDesc
; /* assume we need not copy tupDesc */
593 state
->scanKeys
= (ScanKey
) palloc0(nkeys
* sizeof(ScanKeyData
));
595 for (i
= 0; i
< nkeys
; i
++)
600 AssertArg(attNums
[i
] != 0);
601 AssertArg(sortOperators
[i
] != 0);
603 if (!get_compare_function_for_ordering_op(sortOperators
[i
],
604 &sortFunction
, &reverse
))
605 elog(ERROR
, "operator %u is not a valid ordering operator",
609 * We needn't fill in sk_strategy or sk_subtype since these scankeys
610 * will never be passed to an index.
612 ScanKeyInit(&state
->scanKeys
[i
],
618 /* However, we use btree's conventions for encoding directionality */
620 state
->scanKeys
[i
].sk_flags
|= SK_BT_DESC
;
621 if (nullsFirstFlags
[i
])
622 state
->scanKeys
[i
].sk_flags
|= SK_BT_NULLS_FIRST
;
625 MemoryContextSwitchTo(oldcontext
);
631 tuplesort_begin_index_btree(Relation indexRel
,
633 int workMem
, bool randomAccess
)
635 Tuplesortstate
*state
= tuplesort_begin_common(workMem
, randomAccess
);
636 MemoryContext oldcontext
;
638 oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
643 "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
644 enforceUnique
? 't' : 'f',
645 workMem
, randomAccess
? 't' : 'f');
648 state
->nKeys
= RelationGetNumberOfAttributes(indexRel
);
650 TRACE_POSTGRESQL_SORT_START(INDEX_SORT
,
656 state
->comparetup
= comparetup_index_btree
;
657 state
->copytup
= copytup_index
;
658 state
->writetup
= writetup_index
;
659 state
->readtup
= readtup_index
;
660 state
->reversedirection
= reversedirection_index_btree
;
662 state
->indexRel
= indexRel
;
663 state
->indexScanKey
= _bt_mkscankey_nodata(indexRel
);
664 state
->enforceUnique
= enforceUnique
;
666 MemoryContextSwitchTo(oldcontext
);
672 tuplesort_begin_index_hash(Relation indexRel
,
674 int workMem
, bool randomAccess
)
676 Tuplesortstate
*state
= tuplesort_begin_common(workMem
, randomAccess
);
677 MemoryContext oldcontext
;
679 oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
684 "begin index sort: hash_mask = 0x%x, workMem = %d, randomAccess = %c",
686 workMem
, randomAccess
? 't' : 'f');
689 state
->nKeys
= 1; /* Only one sort column, the hash code */
691 state
->comparetup
= comparetup_index_hash
;
692 state
->copytup
= copytup_index
;
693 state
->writetup
= writetup_index
;
694 state
->readtup
= readtup_index
;
695 state
->reversedirection
= reversedirection_index_hash
;
697 state
->indexRel
= indexRel
;
699 state
->hash_mask
= hash_mask
;
701 MemoryContextSwitchTo(oldcontext
);
707 tuplesort_begin_datum(Oid datumType
,
708 Oid sortOperator
, bool nullsFirstFlag
,
709 int workMem
, bool randomAccess
)
711 Tuplesortstate
*state
= tuplesort_begin_common(workMem
, randomAccess
);
712 MemoryContext oldcontext
;
718 oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
723 "begin datum sort: workMem = %d, randomAccess = %c",
724 workMem
, randomAccess
? 't' : 'f');
727 state
->nKeys
= 1; /* always a one-column sort */
729 TRACE_POSTGRESQL_SORT_START(DATUM_SORT
,
730 false, /* no unique check */
735 state
->comparetup
= comparetup_datum
;
736 state
->copytup
= copytup_datum
;
737 state
->writetup
= writetup_datum
;
738 state
->readtup
= readtup_datum
;
739 state
->reversedirection
= reversedirection_datum
;
741 state
->datumType
= datumType
;
743 /* lookup the ordering function */
744 if (!get_compare_function_for_ordering_op(sortOperator
,
745 &sortFunction
, &reverse
))
746 elog(ERROR
, "operator %u is not a valid ordering operator",
748 fmgr_info(sortFunction
, &state
->sortOpFn
);
750 /* set ordering flags */
751 state
->sortFnFlags
= reverse
? SK_BT_DESC
: 0;
753 state
->sortFnFlags
|= SK_BT_NULLS_FIRST
;
755 /* lookup necessary attributes of the datum type */
756 get_typlenbyval(datumType
, &typlen
, &typbyval
);
757 state
->datumTypeLen
= typlen
;
758 state
->datumTypeByVal
= typbyval
;
760 MemoryContextSwitchTo(oldcontext
);
766 * tuplesort_set_bound
768 * Advise tuplesort that at most the first N result tuples are required.
770 * Must be called before inserting any tuples. (Actually, we could allow it
771 * as long as the sort hasn't spilled to disk, but there seems no need for
772 * delayed calls at the moment.)
774 * This is a hint only. The tuplesort may still return more tuples than
778 tuplesort_set_bound(Tuplesortstate
*state
, int64 bound
)
780 /* Assert we're called before loading any tuples */
781 Assert(state
->status
== TSS_INITIAL
);
782 Assert(state
->memtupcount
== 0);
783 Assert(!state
->bounded
);
785 #ifdef DEBUG_BOUNDED_SORT
786 /* Honor GUC setting that disables the feature (for easy testing) */
787 if (!optimize_bounded_sort
)
791 /* We want to be able to compute bound * 2, so limit the setting */
792 if (bound
> (int64
) (INT_MAX
/ 2))
795 state
->bounded
= true;
796 state
->bound
= (int) bound
;
802 * Release resources and clean up.
804 * NOTE: after calling this, any pointers returned by tuplesort_getXXX are
805 * pointing to garbage. Be careful not to attempt to use or free such
806 * pointers afterwards!
809 tuplesort_end(Tuplesortstate
*state
)
811 /* context swap probably not needed, but let's be safe */
812 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
818 spaceUsed
= LogicalTapeSetBlocks(state
->tapeset
);
820 spaceUsed
= (state
->allowedMem
- state
->availMem
+ 1023) / 1024;
824 * Delete temporary "tape" files, if any.
826 * Note: want to include this in reported total cost of sort, hence need
827 * for two #ifdef TRACE_SORT sections.
830 LogicalTapeSetClose(state
->tapeset
);
836 elog(LOG
, "external sort ended, %ld disk blocks used: %s",
837 spaceUsed
, pg_rusage_show(&state
->ru_start
));
839 elog(LOG
, "internal sort ended, %ld KB used: %s",
840 spaceUsed
, pg_rusage_show(&state
->ru_start
));
843 TRACE_POSTGRESQL_SORT_DONE(state
->tapeset
!= NULL
, spaceUsed
);
847 * If you disabled TRACE_SORT, you can still probe sort__done, but you
848 * ain't getting space-used stats.
850 TRACE_POSTGRESQL_SORT_DONE(state
->tapeset
!= NULL
, 0L);
853 MemoryContextSwitchTo(oldcontext
);
856 * Free the per-sort memory context, thereby releasing all working memory,
857 * including the Tuplesortstate struct itself.
859 MemoryContextDelete(state
->sortcontext
);
863 * Grow the memtuples[] array, if possible within our memory constraint.
864 * Return TRUE if able to enlarge the array, FALSE if not.
866 * At each increment we double the size of the array. When we are short
867 * on memory we could consider smaller increases, but because availMem
868 * moves around with tuple addition/removal, this might result in thrashing.
869 * Small increases in the array size are likely to be pretty inefficient.
872 grow_memtuples(Tuplesortstate
*state
)
875 * We need to be sure that we do not cause LACKMEM to become true, else
876 * the space management algorithm will go nuts. We assume here that the
877 * memory chunk overhead associated with the memtuples array is constant
878 * and so there will be no unexpected addition to what we ask for. (The
879 * minimum array size established in tuplesort_begin_common is large
880 * enough to force palloc to treat it as a separate chunk, so this
881 * assumption should be good. But let's check it.)
883 if (state
->availMem
<= (long) (state
->memtupsize
* sizeof(SortTuple
)))
887 * On a 64-bit machine, allowedMem could be high enough to get us into
888 * trouble with MaxAllocSize, too.
890 if ((Size
) (state
->memtupsize
* 2) >= MaxAllocSize
/ sizeof(SortTuple
))
893 FREEMEM(state
, GetMemoryChunkSpace(state
->memtuples
));
894 state
->memtupsize
*= 2;
895 state
->memtuples
= (SortTuple
*)
896 repalloc(state
->memtuples
,
897 state
->memtupsize
* sizeof(SortTuple
));
898 USEMEM(state
, GetMemoryChunkSpace(state
->memtuples
));
900 elog(ERROR
, "unexpected out-of-memory situation during sort");
905 * Accept one tuple while collecting input data for sort.
907 * Note that the input data is always copied; the caller need not save it.
910 tuplesort_puttupleslot(Tuplesortstate
*state
, TupleTableSlot
*slot
)
912 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
916 * Copy the given tuple into memory we control, and decrease availMem.
917 * Then call the common code.
919 COPYTUP(state
, &stup
, (void *) slot
);
921 puttuple_common(state
, &stup
);
923 MemoryContextSwitchTo(oldcontext
);
927 * Accept one index tuple while collecting input data for sort.
929 * Note that the input tuple is always copied; the caller need not save it.
932 tuplesort_putindextuple(Tuplesortstate
*state
, IndexTuple tuple
)
934 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
938 * Copy the given tuple into memory we control, and decrease availMem.
939 * Then call the common code.
941 COPYTUP(state
, &stup
, (void *) tuple
);
943 puttuple_common(state
, &stup
);
945 MemoryContextSwitchTo(oldcontext
);
949 * Accept one Datum while collecting input data for sort.
951 * If the Datum is pass-by-ref type, the value will be copied.
954 tuplesort_putdatum(Tuplesortstate
*state
, Datum val
, bool isNull
)
956 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
960 * If it's a pass-by-reference value, copy it into memory we control, and
961 * decrease availMem. Then call the common code.
963 if (isNull
|| state
->datumTypeByVal
)
966 stup
.isnull1
= isNull
;
967 stup
.tuple
= NULL
; /* no separate storage */
971 stup
.datum1
= datumCopy(val
, false, state
->datumTypeLen
);
972 stup
.isnull1
= false;
973 stup
.tuple
= DatumGetPointer(stup
.datum1
);
974 USEMEM(state
, GetMemoryChunkSpace(stup
.tuple
));
977 puttuple_common(state
, &stup
);
979 MemoryContextSwitchTo(oldcontext
);
983 * Shared code for tuple and datum cases.
986 puttuple_common(Tuplesortstate
*state
, SortTuple
*tuple
)
988 switch (state
->status
)
993 * Save the tuple into the unsorted array. First, grow the array
994 * as needed. Note that we try to grow the array when there is
995 * still one free slot remaining --- if we fail, there'll still be
996 * room to store the incoming tuple, and then we'll switch to
997 * tape-based operation.
999 if (state
->memtupcount
>= state
->memtupsize
- 1)
1001 (void) grow_memtuples(state
);
1002 Assert(state
->memtupcount
< state
->memtupsize
);
1004 state
->memtuples
[state
->memtupcount
++] = *tuple
;
1007 * Check if it's time to switch over to a bounded heapsort. We do
1008 * so if the input tuple count exceeds twice the desired tuple
1009 * count (this is a heuristic for where heapsort becomes cheaper
1010 * than a quicksort), or if we've just filled workMem and have
1011 * enough tuples to meet the bound.
1013 * Note that once we enter TSS_BOUNDED state we will always try to
1014 * complete the sort that way. In the worst case, if later input
1015 * tuples are larger than earlier ones, this might cause us to
1016 * exceed workMem significantly.
1018 if (state
->bounded
&&
1019 (state
->memtupcount
> state
->bound
* 2 ||
1020 (state
->memtupcount
> state
->bound
&& LACKMEM(state
))))
1024 elog(LOG
, "switching to bounded heapsort at %d tuples: %s",
1026 pg_rusage_show(&state
->ru_start
));
1028 make_bounded_heap(state
);
1033 * Done if we still fit in available memory and have array slots.
1035 if (state
->memtupcount
< state
->memtupsize
&& !LACKMEM(state
))
1039 * Nope; time to switch to tape-based operation.
1044 * Dump tuples until we are back under the limit.
1046 dumptuples(state
, false);
1052 * We don't want to grow the array here, so check whether the new
1053 * tuple can be discarded before putting it in. This should be a
1054 * good speed optimization, too, since when there are many more
1055 * input tuples than the bound, most input tuples can be discarded
1056 * with just this one comparison. Note that because we currently
1057 * have the sort direction reversed, we must check for <= not >=.
1059 if (COMPARETUP(state
, tuple
, &state
->memtuples
[0]) <= 0)
1061 /* new tuple <= top of the heap, so we can discard it */
1062 free_sort_tuple(state
, tuple
);
1066 /* discard top of heap, sift up, insert new tuple */
1067 free_sort_tuple(state
, &state
->memtuples
[0]);
1068 tuplesort_heap_siftup(state
, false);
1069 tuplesort_heap_insert(state
, tuple
, 0, false);
1076 * Insert the tuple into the heap, with run number currentRun if
1077 * it can go into the current run, else run number currentRun+1.
1078 * The tuple can go into the current run if it is >= the first
1079 * not-yet-output tuple. (Actually, it could go into the current
1080 * run if it is >= the most recently output tuple ... but that
1081 * would require keeping around the tuple we last output, and it's
1082 * simplest to let writetup free each tuple as soon as it's
1085 * Note there will always be at least one tuple in the heap at
1086 * this point; see dumptuples.
1088 Assert(state
->memtupcount
> 0);
1089 if (COMPARETUP(state
, tuple
, &state
->memtuples
[0]) >= 0)
1090 tuplesort_heap_insert(state
, tuple
, state
->currentRun
, true);
1092 tuplesort_heap_insert(state
, tuple
, state
->currentRun
+ 1, true);
1095 * If we are over the memory limit, dump tuples till we're under.
1097 dumptuples(state
, false);
1101 elog(ERROR
, "invalid tuplesort state");
1107 * All tuples have been provided; finish the sort.
1110 tuplesort_performsort(Tuplesortstate
*state
)
1112 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
1116 elog(LOG
, "performsort starting: %s",
1117 pg_rusage_show(&state
->ru_start
));
1120 switch (state
->status
)
1125 * We were able to accumulate all the tuples within the allowed
1126 * amount of memory. Just qsort 'em and we're done.
1128 if (state
->memtupcount
> 1)
1129 qsort_arg((void *) state
->memtuples
,
1132 (qsort_arg_comparator
) state
->comparetup
,
1135 state
->eof_reached
= false;
1136 state
->markpos_offset
= 0;
1137 state
->markpos_eof
= false;
1138 state
->status
= TSS_SORTEDINMEM
;
1144 * We were able to accumulate all the tuples required for output
1145 * in memory, using a heap to eliminate excess tuples. Now we
1146 * have to transform the heap to a properly-sorted array.
1148 sort_bounded_heap(state
);
1150 state
->eof_reached
= false;
1151 state
->markpos_offset
= 0;
1152 state
->markpos_eof
= false;
1153 state
->status
= TSS_SORTEDINMEM
;
1159 * Finish tape-based sort. First, flush all tuples remaining in
1160 * memory out to tape; then merge until we have a single remaining
1161 * run (or, if !randomAccess, one run per tape). Note that
1162 * mergeruns sets the correct state->status.
1164 dumptuples(state
, true);
1166 state
->eof_reached
= false;
1167 state
->markpos_block
= 0L;
1168 state
->markpos_offset
= 0;
1169 state
->markpos_eof
= false;
1173 elog(ERROR
, "invalid tuplesort state");
1180 if (state
->status
== TSS_FINALMERGE
)
1181 elog(LOG
, "performsort done (except %d-way final merge): %s",
1183 pg_rusage_show(&state
->ru_start
));
1185 elog(LOG
, "performsort done: %s",
1186 pg_rusage_show(&state
->ru_start
));
1190 MemoryContextSwitchTo(oldcontext
);
1194 * Internal routine to fetch the next tuple in either forward or back
1195 * direction into *stup. Returns FALSE if no more tuples.
1196 * If *should_free is set, the caller must pfree stup.tuple when done with it.
1199 tuplesort_gettuple_common(Tuplesortstate
*state
, bool forward
,
1200 SortTuple
*stup
, bool *should_free
)
1202 unsigned int tuplen
;
1204 switch (state
->status
)
1206 case TSS_SORTEDINMEM
:
1207 Assert(forward
|| state
->randomAccess
);
1208 *should_free
= false;
1211 if (state
->current
< state
->memtupcount
)
1213 *stup
= state
->memtuples
[state
->current
++];
1216 state
->eof_reached
= true;
1219 * Complain if caller tries to retrieve more tuples than
1220 * originally asked for in a bounded sort. This is because
1221 * returning EOF here might be the wrong thing.
1223 if (state
->bounded
&& state
->current
>= state
->bound
)
1224 elog(ERROR
, "retrieved too many tuples in a bounded sort");
1230 if (state
->current
<= 0)
1234 * if all tuples are fetched already then we return last
1235 * tuple, else - tuple before last returned.
1237 if (state
->eof_reached
)
1238 state
->eof_reached
= false;
1241 state
->current
--; /* last returned tuple */
1242 if (state
->current
<= 0)
1245 *stup
= state
->memtuples
[state
->current
- 1];
1250 case TSS_SORTEDONTAPE
:
1251 Assert(forward
|| state
->randomAccess
);
1252 *should_free
= true;
1255 if (state
->eof_reached
)
1257 if ((tuplen
= getlen(state
, state
->result_tape
, true)) != 0)
1259 READTUP(state
, stup
, state
->result_tape
, tuplen
);
1264 state
->eof_reached
= true;
1272 * if all tuples are fetched already then we return last tuple,
1273 * else - tuple before last returned.
1275 if (state
->eof_reached
)
1278 * Seek position is pointing just past the zero tuplen at the
1279 * end of file; back up to fetch last tuple's ending length
1280 * word. If seek fails we must have a completely empty file.
1282 if (!LogicalTapeBackspace(state
->tapeset
,
1284 2 * sizeof(unsigned int)))
1286 state
->eof_reached
= false;
1291 * Back up and fetch previously-returned tuple's ending length
1292 * word. If seek fails, assume we are at start of file.
1294 if (!LogicalTapeBackspace(state
->tapeset
,
1296 sizeof(unsigned int)))
1298 tuplen
= getlen(state
, state
->result_tape
, false);
1301 * Back up to get ending length word of tuple before it.
1303 if (!LogicalTapeBackspace(state
->tapeset
,
1305 tuplen
+ 2 * sizeof(unsigned int)))
1308 * If that fails, presumably the prev tuple is the first
1309 * in the file. Back up so that it becomes next to read
1310 * in forward direction (not obviously right, but that is
1311 * what in-memory case does).
1313 if (!LogicalTapeBackspace(state
->tapeset
,
1315 tuplen
+ sizeof(unsigned int)))
1316 elog(ERROR
, "bogus tuple length in backward scan");
1321 tuplen
= getlen(state
, state
->result_tape
, false);
1324 * Now we have the length of the prior tuple, back up and read it.
1325 * Note: READTUP expects we are positioned after the initial
1326 * length word of the tuple, so back up to that point.
1328 if (!LogicalTapeBackspace(state
->tapeset
,
1331 elog(ERROR
, "bogus tuple length in backward scan");
1332 READTUP(state
, stup
, state
->result_tape
, tuplen
);
1335 case TSS_FINALMERGE
:
1337 *should_free
= true;
1340 * This code should match the inner loop of mergeonerun().
1342 if (state
->memtupcount
> 0)
1344 int srcTape
= state
->memtuples
[0].tupindex
;
1349 *stup
= state
->memtuples
[0];
1350 /* returned tuple is no longer counted in our memory space */
1353 tuplen
= GetMemoryChunkSpace(stup
->tuple
);
1354 state
->availMem
+= tuplen
;
1355 state
->mergeavailmem
[srcTape
] += tuplen
;
1357 tuplesort_heap_siftup(state
, false);
1358 if ((tupIndex
= state
->mergenext
[srcTape
]) == 0)
1361 * out of preloaded data on this tape, try to read more
1363 * Unlike mergeonerun(), we only preload from the single
1364 * tape that's run dry. See mergepreread() comments.
1366 mergeprereadone(state
, srcTape
);
1369 * if still no data, we've reached end of run on this tape
1371 if ((tupIndex
= state
->mergenext
[srcTape
]) == 0)
1374 /* pull next preread tuple from list, insert in heap */
1375 newtup
= &state
->memtuples
[tupIndex
];
1376 state
->mergenext
[srcTape
] = newtup
->tupindex
;
1377 if (state
->mergenext
[srcTape
] == 0)
1378 state
->mergelast
[srcTape
] = 0;
1379 tuplesort_heap_insert(state
, newtup
, srcTape
, false);
1380 /* put the now-unused memtuples entry on the freelist */
1381 newtup
->tupindex
= state
->mergefreelist
;
1382 state
->mergefreelist
= tupIndex
;
1383 state
->mergeavailslots
[srcTape
]++;
1389 elog(ERROR
, "invalid tuplesort state");
1390 return false; /* keep compiler quiet */
1395 * Fetch the next tuple in either forward or back direction.
1396 * If successful, put tuple in slot and return TRUE; else, clear the slot
1400 tuplesort_gettupleslot(Tuplesortstate
*state
, bool forward
,
1401 TupleTableSlot
*slot
)
1403 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
1407 if (!tuplesort_gettuple_common(state
, forward
, &stup
, &should_free
))
1410 MemoryContextSwitchTo(oldcontext
);
1414 ExecStoreMinimalTuple((MinimalTuple
) stup
.tuple
, slot
, should_free
);
1419 ExecClearTuple(slot
);
1425 * Fetch the next index tuple in either forward or back direction.
1426 * Returns NULL if no more tuples. If *should_free is set, the
1427 * caller must pfree the returned tuple when done with it.
1430 tuplesort_getindextuple(Tuplesortstate
*state
, bool forward
,
1433 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
1436 if (!tuplesort_gettuple_common(state
, forward
, &stup
, should_free
))
1439 MemoryContextSwitchTo(oldcontext
);
1441 return (IndexTuple
) stup
.tuple
;
1445 * Fetch the next Datum in either forward or back direction.
1446 * Returns FALSE if no more datums.
1448 * If the Datum is pass-by-ref type, the returned value is freshly palloc'd
1449 * and is now owned by the caller.
1452 tuplesort_getdatum(Tuplesortstate
*state
, bool forward
,
1453 Datum
*val
, bool *isNull
)
1455 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
1459 if (!tuplesort_gettuple_common(state
, forward
, &stup
, &should_free
))
1461 MemoryContextSwitchTo(oldcontext
);
1465 if (stup
.isnull1
|| state
->datumTypeByVal
)
1468 *isNull
= stup
.isnull1
;
1475 *val
= datumCopy(stup
.datum1
, false, state
->datumTypeLen
);
1479 MemoryContextSwitchTo(oldcontext
);
1485 * tuplesort_merge_order - report merge order we'll use for given memory
1486 * (note: "merge order" just means the number of input tapes in the merge).
1488 * This is exported for use by the planner. allowedMem is in bytes.
1491 tuplesort_merge_order(long allowedMem
)
1496 * We need one tape for each merge input, plus another one for the output,
1497 * and each of these tapes needs buffer space. In addition we want
1498 * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
1501 * Note: you might be thinking we need to account for the memtuples[]
1502 * array in this calculation, but we effectively treat that as part of the
1503 * MERGE_BUFFER_SIZE workspace.
1505 mOrder
= (allowedMem
- TAPE_BUFFER_OVERHEAD
) /
1506 (MERGE_BUFFER_SIZE
+ TAPE_BUFFER_OVERHEAD
);
1508 /* Even in minimum memory, use at least a MINORDER merge */
1509 mOrder
= Max(mOrder
, MINORDER
);
1515 * inittapes - initialize for tape sorting.
1517 * This is called only if we have found we don't have room to sort in memory.
1520 inittapes(Tuplesortstate
*state
)
1527 /* Compute number of tapes to use: merge order plus 1 */
1528 maxTapes
= tuplesort_merge_order(state
->allowedMem
) + 1;
1531 * We must have at least 2*maxTapes slots in the memtuples[] array, else
1532 * we'd not have room for merge heap plus preread. It seems unlikely that
1533 * this case would ever occur, but be safe.
1535 maxTapes
= Min(maxTapes
, state
->memtupsize
/ 2);
1537 state
->maxTapes
= maxTapes
;
1538 state
->tapeRange
= maxTapes
- 1;
1542 elog(LOG
, "switching to external sort with %d tapes: %s",
1543 maxTapes
, pg_rusage_show(&state
->ru_start
));
1547 * Decrease availMem to reflect the space needed for tape buffers; but
1548 * don't decrease it to the point that we have no room for tuples. (That
1549 * case is only likely to occur if sorting pass-by-value Datums; in all
1550 * other scenarios the memtuples[] array is unlikely to occupy more than
1551 * half of allowedMem. In the pass-by-value case it's not important to
1552 * account for tuple space, so we don't care if LACKMEM becomes
1555 tapeSpace
= maxTapes
* TAPE_BUFFER_OVERHEAD
;
1556 if (tapeSpace
+ GetMemoryChunkSpace(state
->memtuples
) < state
->allowedMem
)
1557 USEMEM(state
, tapeSpace
);
1560 * Make sure that the temp file(s) underlying the tape set are created in
1561 * suitable temp tablespaces.
1563 PrepareTempTablespaces();
1566 * Create the tape set and allocate the per-tape data arrays.
1568 state
->tapeset
= LogicalTapeSetCreate(maxTapes
);
1570 state
->mergeactive
= (bool *) palloc0(maxTapes
* sizeof(bool));
1571 state
->mergenext
= (int *) palloc0(maxTapes
* sizeof(int));
1572 state
->mergelast
= (int *) palloc0(maxTapes
* sizeof(int));
1573 state
->mergeavailslots
= (int *) palloc0(maxTapes
* sizeof(int));
1574 state
->mergeavailmem
= (long *) palloc0(maxTapes
* sizeof(long));
1575 state
->tp_fib
= (int *) palloc0(maxTapes
* sizeof(int));
1576 state
->tp_runs
= (int *) palloc0(maxTapes
* sizeof(int));
1577 state
->tp_dummy
= (int *) palloc0(maxTapes
* sizeof(int));
1578 state
->tp_tapenum
= (int *) palloc0(maxTapes
* sizeof(int));
1581 * Convert the unsorted contents of memtuples[] into a heap. Each tuple is
1582 * marked as belonging to run number zero.
1584 * NOTE: we pass false for checkIndex since there's no point in comparing
1585 * indexes in this step, even though we do intend the indexes to be part
1586 * of the sort key...
1588 ntuples
= state
->memtupcount
;
1589 state
->memtupcount
= 0; /* make the heap empty */
1590 for (j
= 0; j
< ntuples
; j
++)
1592 /* Must copy source tuple to avoid possible overwrite */
1593 SortTuple stup
= state
->memtuples
[j
];
1595 tuplesort_heap_insert(state
, &stup
, 0, false);
1597 Assert(state
->memtupcount
== ntuples
);
1599 state
->currentRun
= 0;
1602 * Initialize variables of Algorithm D (step D1).
1604 for (j
= 0; j
< maxTapes
; j
++)
1606 state
->tp_fib
[j
] = 1;
1607 state
->tp_runs
[j
] = 0;
1608 state
->tp_dummy
[j
] = 1;
1609 state
->tp_tapenum
[j
] = j
;
1611 state
->tp_fib
[state
->tapeRange
] = 0;
1612 state
->tp_dummy
[state
->tapeRange
] = 0;
1615 state
->destTape
= 0;
1617 state
->status
= TSS_BUILDRUNS
;
1621 * selectnewtape -- select new tape for new initial run.
1623 * This is called after finishing a run when we know another run
1624 * must be started. This implements steps D3, D4 of Algorithm D.
1627 selectnewtape(Tuplesortstate
*state
)
1632 /* Step D3: advance j (destTape) */
1633 if (state
->tp_dummy
[state
->destTape
] < state
->tp_dummy
[state
->destTape
+ 1])
1638 if (state
->tp_dummy
[state
->destTape
] != 0)
1640 state
->destTape
= 0;
1644 /* Step D4: increase level */
1646 a
= state
->tp_fib
[0];
1647 for (j
= 0; j
< state
->tapeRange
; j
++)
1649 state
->tp_dummy
[j
] = a
+ state
->tp_fib
[j
+ 1] - state
->tp_fib
[j
];
1650 state
->tp_fib
[j
] = a
+ state
->tp_fib
[j
+ 1];
1652 state
->destTape
= 0;
1656 * mergeruns -- merge all the completed initial runs.
1658 * This implements steps D5, D6 of Algorithm D. All input data has
1659 * already been written to initial runs on tape (see dumptuples).
1662 mergeruns(Tuplesortstate
*state
)
1669 Assert(state
->status
== TSS_BUILDRUNS
);
1670 Assert(state
->memtupcount
== 0);
1673 * If we produced only one initial run (quite likely if the total data
1674 * volume is between 1X and 2X workMem), we can just use that tape as the
1675 * finished output, rather than doing a useless merge. (This obvious
1676 * optimization is not in Knuth's algorithm.)
1678 if (state
->currentRun
== 1)
1680 state
->result_tape
= state
->tp_tapenum
[state
->destTape
];
1681 /* must freeze and rewind the finished output tape */
1682 LogicalTapeFreeze(state
->tapeset
, state
->result_tape
);
1683 state
->status
= TSS_SORTEDONTAPE
;
1687 /* End of step D2: rewind all output tapes to prepare for merging */
1688 for (tapenum
= 0; tapenum
< state
->tapeRange
; tapenum
++)
1689 LogicalTapeRewind(state
->tapeset
, tapenum
, false);
1694 * At this point we know that tape[T] is empty. If there's just one
1695 * (real or dummy) run left on each input tape, then only one merge
1696 * pass remains. If we don't have to produce a materialized sorted
1697 * tape, we can stop at this point and do the final merge on-the-fly.
1699 if (!state
->randomAccess
)
1701 bool allOneRun
= true;
1703 Assert(state
->tp_runs
[state
->tapeRange
] == 0);
1704 for (tapenum
= 0; tapenum
< state
->tapeRange
; tapenum
++)
1706 if (state
->tp_runs
[tapenum
] + state
->tp_dummy
[tapenum
] != 1)
1714 /* Tell logtape.c we won't be writing anymore */
1715 LogicalTapeSetForgetFreeSpace(state
->tapeset
);
1716 /* Initialize for the final merge pass */
1718 state
->status
= TSS_FINALMERGE
;
1723 /* Step D5: merge runs onto tape[T] until tape[P] is empty */
1724 while (state
->tp_runs
[state
->tapeRange
- 1] ||
1725 state
->tp_dummy
[state
->tapeRange
- 1])
1727 bool allDummy
= true;
1729 for (tapenum
= 0; tapenum
< state
->tapeRange
; tapenum
++)
1731 if (state
->tp_dummy
[tapenum
] == 0)
1740 state
->tp_dummy
[state
->tapeRange
]++;
1741 for (tapenum
= 0; tapenum
< state
->tapeRange
; tapenum
++)
1742 state
->tp_dummy
[tapenum
]--;
1748 /* Step D6: decrease level */
1749 if (--state
->Level
== 0)
1751 /* rewind output tape T to use as new input */
1752 LogicalTapeRewind(state
->tapeset
, state
->tp_tapenum
[state
->tapeRange
],
1754 /* rewind used-up input tape P, and prepare it for write pass */
1755 LogicalTapeRewind(state
->tapeset
, state
->tp_tapenum
[state
->tapeRange
- 1],
1757 state
->tp_runs
[state
->tapeRange
- 1] = 0;
1760 * reassign tape units per step D6; note we no longer care about A[]
1762 svTape
= state
->tp_tapenum
[state
->tapeRange
];
1763 svDummy
= state
->tp_dummy
[state
->tapeRange
];
1764 svRuns
= state
->tp_runs
[state
->tapeRange
];
1765 for (tapenum
= state
->tapeRange
; tapenum
> 0; tapenum
--)
1767 state
->tp_tapenum
[tapenum
] = state
->tp_tapenum
[tapenum
- 1];
1768 state
->tp_dummy
[tapenum
] = state
->tp_dummy
[tapenum
- 1];
1769 state
->tp_runs
[tapenum
] = state
->tp_runs
[tapenum
- 1];
1771 state
->tp_tapenum
[0] = svTape
;
1772 state
->tp_dummy
[0] = svDummy
;
1773 state
->tp_runs
[0] = svRuns
;
1777 * Done. Knuth says that the result is on TAPE[1], but since we exited
1778 * the loop without performing the last iteration of step D6, we have not
1779 * rearranged the tape unit assignment, and therefore the result is on
1780 * TAPE[T]. We need to do it this way so that we can freeze the final
1781 * output tape while rewinding it. The last iteration of step D6 would be
1782 * a waste of cycles anyway...
1784 state
->result_tape
= state
->tp_tapenum
[state
->tapeRange
];
1785 LogicalTapeFreeze(state
->tapeset
, state
->result_tape
);
1786 state
->status
= TSS_SORTEDONTAPE
;
1790 * Merge one run from each input tape, except ones with dummy runs.
1792 * This is the inner loop of Algorithm D step D5. We know that the
1793 * output tape is TAPE[T].
1796 mergeonerun(Tuplesortstate
*state
)
1798 int destTape
= state
->tp_tapenum
[state
->tapeRange
];
1806 * Start the merge by loading one tuple from each active source tape into
1807 * the heap. We can also decrease the input run/dummy run counts.
1812 * Execute merge by repeatedly extracting lowest tuple in heap, writing it
1813 * out, and replacing it with next tuple from same tape (if there is
1816 while (state
->memtupcount
> 0)
1818 /* write the tuple to destTape */
1819 priorAvail
= state
->availMem
;
1820 srcTape
= state
->memtuples
[0].tupindex
;
1821 WRITETUP(state
, destTape
, &state
->memtuples
[0]);
1822 /* writetup adjusted total free space, now fix per-tape space */
1823 spaceFreed
= state
->availMem
- priorAvail
;
1824 state
->mergeavailmem
[srcTape
] += spaceFreed
;
1825 /* compact the heap */
1826 tuplesort_heap_siftup(state
, false);
1827 if ((tupIndex
= state
->mergenext
[srcTape
]) == 0)
1829 /* out of preloaded data on this tape, try to read more */
1830 mergepreread(state
);
1831 /* if still no data, we've reached end of run on this tape */
1832 if ((tupIndex
= state
->mergenext
[srcTape
]) == 0)
1835 /* pull next preread tuple from list, insert in heap */
1836 tup
= &state
->memtuples
[tupIndex
];
1837 state
->mergenext
[srcTape
] = tup
->tupindex
;
1838 if (state
->mergenext
[srcTape
] == 0)
1839 state
->mergelast
[srcTape
] = 0;
1840 tuplesort_heap_insert(state
, tup
, srcTape
, false);
1841 /* put the now-unused memtuples entry on the freelist */
1842 tup
->tupindex
= state
->mergefreelist
;
1843 state
->mergefreelist
= tupIndex
;
1844 state
->mergeavailslots
[srcTape
]++;
1848 * When the heap empties, we're done. Write an end-of-run marker on the
1849 * output tape, and increment its count of real runs.
1851 markrunend(state
, destTape
);
1852 state
->tp_runs
[state
->tapeRange
]++;
1856 elog(LOG
, "finished %d-way merge step: %s", state
->activeTapes
,
1857 pg_rusage_show(&state
->ru_start
));
1862 * beginmerge - initialize for a merge pass
1864 * We decrease the counts of real and dummy runs for each tape, and mark
1865 * which tapes contain active input runs in mergeactive[]. Then, load
1866 * as many tuples as we can from each active input tape, and finally
1867 * fill the merge heap with the first tuple from each active tape.
1870 beginmerge(Tuplesortstate
*state
)
1878 /* Heap should be empty here */
1879 Assert(state
->memtupcount
== 0);
1881 /* Adjust run counts and mark the active tapes */
1882 memset(state
->mergeactive
, 0,
1883 state
->maxTapes
* sizeof(*state
->mergeactive
));
1885 for (tapenum
= 0; tapenum
< state
->tapeRange
; tapenum
++)
1887 if (state
->tp_dummy
[tapenum
] > 0)
1888 state
->tp_dummy
[tapenum
]--;
1891 Assert(state
->tp_runs
[tapenum
] > 0);
1892 state
->tp_runs
[tapenum
]--;
1893 srcTape
= state
->tp_tapenum
[tapenum
];
1894 state
->mergeactive
[srcTape
] = true;
1898 state
->activeTapes
= activeTapes
;
1900 /* Clear merge-pass state variables */
1901 memset(state
->mergenext
, 0,
1902 state
->maxTapes
* sizeof(*state
->mergenext
));
1903 memset(state
->mergelast
, 0,
1904 state
->maxTapes
* sizeof(*state
->mergelast
));
1905 state
->mergefreelist
= 0; /* nothing in the freelist */
1906 state
->mergefirstfree
= activeTapes
; /* 1st slot avail for preread */
1909 * Initialize space allocation to let each active input tape have an equal
1910 * share of preread space.
1912 Assert(activeTapes
> 0);
1913 slotsPerTape
= (state
->memtupsize
- state
->mergefirstfree
) / activeTapes
;
1914 Assert(slotsPerTape
> 0);
1915 spacePerTape
= state
->availMem
/ activeTapes
;
1916 for (srcTape
= 0; srcTape
< state
->maxTapes
; srcTape
++)
1918 if (state
->mergeactive
[srcTape
])
1920 state
->mergeavailslots
[srcTape
] = slotsPerTape
;
1921 state
->mergeavailmem
[srcTape
] = spacePerTape
;
1926 * Preread as many tuples as possible (and at least one) from each active
1929 mergepreread(state
);
1931 /* Load the merge heap with the first tuple from each input tape */
1932 for (srcTape
= 0; srcTape
< state
->maxTapes
; srcTape
++)
1934 int tupIndex
= state
->mergenext
[srcTape
];
1939 tup
= &state
->memtuples
[tupIndex
];
1940 state
->mergenext
[srcTape
] = tup
->tupindex
;
1941 if (state
->mergenext
[srcTape
] == 0)
1942 state
->mergelast
[srcTape
] = 0;
1943 tuplesort_heap_insert(state
, tup
, srcTape
, false);
1944 /* put the now-unused memtuples entry on the freelist */
1945 tup
->tupindex
= state
->mergefreelist
;
1946 state
->mergefreelist
= tupIndex
;
1947 state
->mergeavailslots
[srcTape
]++;
1953 * mergepreread - load tuples from merge input tapes
1955 * This routine exists to improve sequentiality of reads during a merge pass,
1956 * as explained in the header comments of this file. Load tuples from each
1957 * active source tape until the tape's run is exhausted or it has used up
1958 * its fair share of available memory. In any case, we guarantee that there
1959 * is at least one preread tuple available from each unexhausted input tape.
1961 * We invoke this routine at the start of a merge pass for initial load,
1962 * and then whenever any tape's preread data runs out. Note that we load
1963 * as much data as possible from all tapes, not just the one that ran out.
1964 * This is because logtape.c works best with a usage pattern that alternates
1965 * between reading a lot of data and writing a lot of data, so whenever we
1966 * are forced to read, we should fill working memory completely.
1968 * In FINALMERGE state, we *don't* use this routine, but instead just preread
1969 * from the single tape that ran dry. There's no read/write alternation in
1970 * that state and so no point in scanning through all the tapes to fix one.
1971 * (Moreover, there may be quite a lot of inactive tapes in that state, since
1972 * we might have had many fewer runs than tapes. In a regular tape-to-tape
1973 * merge we can expect most of the tapes to be active.)
1976 mergepreread(Tuplesortstate
*state
)
1980 for (srcTape
= 0; srcTape
< state
->maxTapes
; srcTape
++)
1981 mergeprereadone(state
, srcTape
);
1985 * mergeprereadone - load tuples from one merge input tape
1987 * Read tuples from the specified tape until it has used up its free memory
1988 * or array slots; but ensure that we have at least one tuple, if any are
1992 mergeprereadone(Tuplesortstate
*state
, int srcTape
)
1994 unsigned int tuplen
;
2000 if (!state
->mergeactive
[srcTape
])
2001 return; /* tape's run is already exhausted */
2002 priorAvail
= state
->availMem
;
2003 state
->availMem
= state
->mergeavailmem
[srcTape
];
2004 while ((state
->mergeavailslots
[srcTape
] > 0 && !LACKMEM(state
)) ||
2005 state
->mergenext
[srcTape
] == 0)
2007 /* read next tuple, if any */
2008 if ((tuplen
= getlen(state
, srcTape
, true)) == 0)
2010 state
->mergeactive
[srcTape
] = false;
2013 READTUP(state
, &stup
, srcTape
, tuplen
);
2014 /* find a free slot in memtuples[] for it */
2015 tupIndex
= state
->mergefreelist
;
2017 state
->mergefreelist
= state
->memtuples
[tupIndex
].tupindex
;
2020 tupIndex
= state
->mergefirstfree
++;
2021 Assert(tupIndex
< state
->memtupsize
);
2023 state
->mergeavailslots
[srcTape
]--;
2024 /* store tuple, append to list for its tape */
2026 state
->memtuples
[tupIndex
] = stup
;
2027 if (state
->mergelast
[srcTape
])
2028 state
->memtuples
[state
->mergelast
[srcTape
]].tupindex
= tupIndex
;
2030 state
->mergenext
[srcTape
] = tupIndex
;
2031 state
->mergelast
[srcTape
] = tupIndex
;
2033 /* update per-tape and global availmem counts */
2034 spaceUsed
= state
->mergeavailmem
[srcTape
] - state
->availMem
;
2035 state
->mergeavailmem
[srcTape
] = state
->availMem
;
2036 state
->availMem
= priorAvail
- spaceUsed
;
2040 * dumptuples - remove tuples from heap and write to tape
2042 * This is used during initial-run building, but not during merging.
2044 * When alltuples = false, dump only enough tuples to get under the
2045 * availMem limit (and leave at least one tuple in the heap in any case,
2046 * since puttuple assumes it always has a tuple to compare to). We also
2047 * insist there be at least one free slot in the memtuples[] array.
2049 * When alltuples = true, dump everything currently in memory.
2050 * (This case is only used at end of input data.)
2052 * If we empty the heap, close out the current run and return (this should
2053 * only happen at end of input data). If we see that the tuple run number
2054 * at the top of the heap has changed, start a new run.
2057 dumptuples(Tuplesortstate
*state
, bool alltuples
)
2060 (LACKMEM(state
) && state
->memtupcount
> 1) ||
2061 state
->memtupcount
>= state
->memtupsize
)
2064 * Dump the heap's frontmost entry, and sift up to remove it from the
2067 Assert(state
->memtupcount
> 0);
2068 WRITETUP(state
, state
->tp_tapenum
[state
->destTape
],
2069 &state
->memtuples
[0]);
2070 tuplesort_heap_siftup(state
, true);
2073 * If the heap is empty *or* top run number has changed, we've
2074 * finished the current run.
2076 if (state
->memtupcount
== 0 ||
2077 state
->currentRun
!= state
->memtuples
[0].tupindex
)
2079 markrunend(state
, state
->tp_tapenum
[state
->destTape
]);
2080 state
->currentRun
++;
2081 state
->tp_runs
[state
->destTape
]++;
2082 state
->tp_dummy
[state
->destTape
]--; /* per Alg D step D2 */
2086 elog(LOG
, "finished writing%s run %d to tape %d: %s",
2087 (state
->memtupcount
== 0) ? " final" : "",
2088 state
->currentRun
, state
->destTape
,
2089 pg_rusage_show(&state
->ru_start
));
2093 * Done if heap is empty, else prepare for new run.
2095 if (state
->memtupcount
== 0)
2097 Assert(state
->currentRun
== state
->memtuples
[0].tupindex
);
2098 selectnewtape(state
);
2104 * tuplesort_rescan - rewind and replay the scan
2107 tuplesort_rescan(Tuplesortstate
*state
)
2109 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
2111 Assert(state
->randomAccess
);
2113 switch (state
->status
)
2115 case TSS_SORTEDINMEM
:
2117 state
->eof_reached
= false;
2118 state
->markpos_offset
= 0;
2119 state
->markpos_eof
= false;
2121 case TSS_SORTEDONTAPE
:
2122 LogicalTapeRewind(state
->tapeset
,
2125 state
->eof_reached
= false;
2126 state
->markpos_block
= 0L;
2127 state
->markpos_offset
= 0;
2128 state
->markpos_eof
= false;
2131 elog(ERROR
, "invalid tuplesort state");
2135 MemoryContextSwitchTo(oldcontext
);
2139 * tuplesort_markpos - saves current position in the merged sort file
2142 tuplesort_markpos(Tuplesortstate
*state
)
2144 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
2146 Assert(state
->randomAccess
);
2148 switch (state
->status
)
2150 case TSS_SORTEDINMEM
:
2151 state
->markpos_offset
= state
->current
;
2152 state
->markpos_eof
= state
->eof_reached
;
2154 case TSS_SORTEDONTAPE
:
2155 LogicalTapeTell(state
->tapeset
,
2157 &state
->markpos_block
,
2158 &state
->markpos_offset
);
2159 state
->markpos_eof
= state
->eof_reached
;
2162 elog(ERROR
, "invalid tuplesort state");
2166 MemoryContextSwitchTo(oldcontext
);
2170 * tuplesort_restorepos - restores current position in merged sort file to
2171 * last saved position
2174 tuplesort_restorepos(Tuplesortstate
*state
)
2176 MemoryContext oldcontext
= MemoryContextSwitchTo(state
->sortcontext
);
2178 Assert(state
->randomAccess
);
2180 switch (state
->status
)
2182 case TSS_SORTEDINMEM
:
2183 state
->current
= state
->markpos_offset
;
2184 state
->eof_reached
= state
->markpos_eof
;
2186 case TSS_SORTEDONTAPE
:
2187 if (!LogicalTapeSeek(state
->tapeset
,
2189 state
->markpos_block
,
2190 state
->markpos_offset
))
2191 elog(ERROR
, "tuplesort_restorepos failed");
2192 state
->eof_reached
= state
->markpos_eof
;
2195 elog(ERROR
, "invalid tuplesort state");
2199 MemoryContextSwitchTo(oldcontext
);
2203 * tuplesort_explain - produce a line of information for EXPLAIN ANALYZE
2205 * This can be called after tuplesort_performsort() finishes to obtain
2206 * printable summary information about how the sort was performed.
2208 * The result is a palloc'd string.
2211 tuplesort_explain(Tuplesortstate
*state
)
2213 char *result
= (char *) palloc(100);
2217 * Note: it might seem we should print both memory and disk usage for a
2218 * disk-based sort. However, the current code doesn't track memory space
2219 * accurately once we have begun to return tuples to the caller (since we
2220 * don't account for pfree's the caller is expected to do), so we cannot
2221 * rely on availMem in a disk sort. This does not seem worth the overhead
2222 * to fix. Is it worth creating an API for the memory context code to
2223 * tell us how much is actually used in sortcontext?
2226 spaceUsed
= LogicalTapeSetBlocks(state
->tapeset
) * (BLCKSZ
/ 1024);
2228 spaceUsed
= (state
->allowedMem
- state
->availMem
+ 1023) / 1024;
2230 switch (state
->status
)
2232 case TSS_SORTEDINMEM
:
2233 if (state
->boundUsed
)
2234 snprintf(result
, 100,
2235 "Sort Method: top-N heapsort Memory: %ldkB",
2238 snprintf(result
, 100,
2239 "Sort Method: quicksort Memory: %ldkB",
2242 case TSS_SORTEDONTAPE
:
2243 snprintf(result
, 100,
2244 "Sort Method: external sort Disk: %ldkB",
2247 case TSS_FINALMERGE
:
2248 snprintf(result
, 100,
2249 "Sort Method: external merge Disk: %ldkB",
2253 snprintf(result
, 100, "sort still in progress");
2262 * Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
2264 * Compare two SortTuples. If checkIndex is true, use the tuple index
2265 * as the front of the sort key; otherwise, no.
2268 #define HEAPCOMPARE(tup1,tup2) \
2269 (checkIndex && ((tup1)->tupindex != (tup2)->tupindex) ? \
2270 ((tup1)->tupindex) - ((tup2)->tupindex) : \
2271 COMPARETUP(state, tup1, tup2))
2274 * Convert the existing unordered array of SortTuples to a bounded heap,
2275 * discarding all but the smallest "state->bound" tuples.
2277 * When working with a bounded heap, we want to keep the largest entry
2278 * at the root (array entry zero), instead of the smallest as in the normal
2279 * sort case. This allows us to discard the largest entry cheaply.
2280 * Therefore, we temporarily reverse the sort direction.
2282 * We assume that all entries in a bounded heap will always have tupindex
2283 * zero; it therefore doesn't matter that HEAPCOMPARE() doesn't reverse
2284 * the direction of comparison for tupindexes.
2287 make_bounded_heap(Tuplesortstate
*state
)
2289 int tupcount
= state
->memtupcount
;
2292 Assert(state
->status
== TSS_INITIAL
);
2293 Assert(state
->bounded
);
2294 Assert(tupcount
>= state
->bound
);
2296 /* Reverse sort direction so largest entry will be at root */
2297 REVERSEDIRECTION(state
);
2299 state
->memtupcount
= 0; /* make the heap empty */
2300 for (i
= 0; i
< tupcount
; i
++)
2302 if (state
->memtupcount
>= state
->bound
&&
2303 COMPARETUP(state
, &state
->memtuples
[i
], &state
->memtuples
[0]) <= 0)
2305 /* New tuple would just get thrown out, so skip it */
2306 free_sort_tuple(state
, &state
->memtuples
[i
]);
2310 /* Insert next tuple into heap */
2311 /* Must copy source tuple to avoid possible overwrite */
2312 SortTuple stup
= state
->memtuples
[i
];
2314 tuplesort_heap_insert(state
, &stup
, 0, false);
2316 /* If heap too full, discard largest entry */
2317 if (state
->memtupcount
> state
->bound
)
2319 free_sort_tuple(state
, &state
->memtuples
[0]);
2320 tuplesort_heap_siftup(state
, false);
2325 Assert(state
->memtupcount
== state
->bound
);
2326 state
->status
= TSS_BOUNDED
;
2330 * Convert the bounded heap to a properly-sorted array
2333 sort_bounded_heap(Tuplesortstate
*state
)
2335 int tupcount
= state
->memtupcount
;
2337 Assert(state
->status
== TSS_BOUNDED
);
2338 Assert(state
->bounded
);
2339 Assert(tupcount
== state
->bound
);
2342 * We can unheapify in place because each sift-up will remove the largest
2343 * entry, which we can promptly store in the newly freed slot at the end.
2344 * Once we're down to a single-entry heap, we're done.
2346 while (state
->memtupcount
> 1)
2348 SortTuple stup
= state
->memtuples
[0];
2350 /* this sifts-up the next-largest entry and decreases memtupcount */
2351 tuplesort_heap_siftup(state
, false);
2352 state
->memtuples
[state
->memtupcount
] = stup
;
2354 state
->memtupcount
= tupcount
;
2357 * Reverse sort direction back to the original state. This is not
2358 * actually necessary but seems like a good idea for tidiness.
2360 REVERSEDIRECTION(state
);
2362 state
->status
= TSS_SORTEDINMEM
;
2363 state
->boundUsed
= true;
2367 * Insert a new tuple into an empty or existing heap, maintaining the
2368 * heap invariant. Caller is responsible for ensuring there's room.
2370 * Note: we assume *tuple is a temporary variable that can be scribbled on.
2371 * For some callers, tuple actually points to a memtuples[] entry above the
2372 * end of the heap. This is safe as long as it's not immediately adjacent
2373 * to the end of the heap (ie, in the [memtupcount] array entry) --- if it
2374 * is, it might get overwritten before being moved into the heap!
2377 tuplesort_heap_insert(Tuplesortstate
*state
, SortTuple
*tuple
,
2378 int tupleindex
, bool checkIndex
)
2380 SortTuple
*memtuples
;
2384 * Save the tupleindex --- see notes above about writing on *tuple. It's a
2385 * historical artifact that tupleindex is passed as a separate argument
2386 * and not in *tuple, but it's notationally convenient so let's leave it
2389 tuple
->tupindex
= tupleindex
;
2391 memtuples
= state
->memtuples
;
2392 Assert(state
->memtupcount
< state
->memtupsize
);
2395 * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
2396 * using 1-based array indexes, not 0-based.
2398 j
= state
->memtupcount
++;
2401 int i
= (j
- 1) >> 1;
2403 if (HEAPCOMPARE(tuple
, &memtuples
[i
]) >= 0)
2405 memtuples
[j
] = memtuples
[i
];
2408 memtuples
[j
] = *tuple
;
2412 * The tuple at state->memtuples[0] has been removed from the heap.
2413 * Decrement memtupcount, and sift up to maintain the heap invariant.
2416 tuplesort_heap_siftup(Tuplesortstate
*state
, bool checkIndex
)
2418 SortTuple
*memtuples
= state
->memtuples
;
2423 if (--state
->memtupcount
<= 0)
2425 n
= state
->memtupcount
;
2426 tuple
= &memtuples
[n
]; /* tuple that must be reinserted */
2427 i
= 0; /* i is where the "hole" is */
2435 HEAPCOMPARE(&memtuples
[j
], &memtuples
[j
+ 1]) > 0)
2437 if (HEAPCOMPARE(tuple
, &memtuples
[j
]) <= 0)
2439 memtuples
[i
] = memtuples
[j
];
2442 memtuples
[i
] = *tuple
;
2447 * Tape interface routines
2451 getlen(Tuplesortstate
*state
, int tapenum
, bool eofOK
)
2455 if (LogicalTapeRead(state
->tapeset
, tapenum
, (void *) &len
,
2456 sizeof(len
)) != sizeof(len
))
2457 elog(ERROR
, "unexpected end of tape");
2458 if (len
== 0 && !eofOK
)
2459 elog(ERROR
, "unexpected end of data");
2464 markrunend(Tuplesortstate
*state
, int tapenum
)
2466 unsigned int len
= 0;
2468 LogicalTapeWrite(state
->tapeset
, tapenum
, (void *) &len
, sizeof(len
));
2473 * Set up for an external caller of ApplySortFunction. This function
2474 * basically just exists to localize knowledge of the encoding of sk_flags
2475 * used in this module.
2478 SelectSortFunction(Oid sortOperator
,
2485 if (!get_compare_function_for_ordering_op(sortOperator
,
2486 sortFunction
, &reverse
))
2487 elog(ERROR
, "operator %u is not a valid ordering operator",
2490 *sortFlags
= reverse
? SK_BT_DESC
: 0;
2492 *sortFlags
|= SK_BT_NULLS_FIRST
;
2496 * Inline-able copy of FunctionCall2() to save some cycles in sorting.
2499 myFunctionCall2(FmgrInfo
*flinfo
, Datum arg1
, Datum arg2
)
2501 FunctionCallInfoData fcinfo
;
2504 InitFunctionCallInfoData(fcinfo
, flinfo
, 2, NULL
, NULL
);
2506 fcinfo
.arg
[0] = arg1
;
2507 fcinfo
.arg
[1] = arg2
;
2508 fcinfo
.argnull
[0] = false;
2509 fcinfo
.argnull
[1] = false;
2511 result
= FunctionCallInvoke(&fcinfo
);
2513 /* Check for null result, since caller is clearly not expecting one */
2515 elog(ERROR
, "function %u returned NULL", fcinfo
.flinfo
->fn_oid
);
2521 * Apply a sort function (by now converted to fmgr lookup form)
2522 * and return a 3-way comparison result. This takes care of handling
2523 * reverse-sort and NULLs-ordering properly. We assume that DESC and
2524 * NULLS_FIRST options are encoded in sk_flags the same way btree does it.
2527 inlineApplySortFunction(FmgrInfo
*sortFunction
, int sk_flags
,
2528 Datum datum1
, bool isNull1
,
2529 Datum datum2
, bool isNull2
)
2536 compare
= 0; /* NULL "=" NULL */
2537 else if (sk_flags
& SK_BT_NULLS_FIRST
)
2538 compare
= -1; /* NULL "<" NOT_NULL */
2540 compare
= 1; /* NULL ">" NOT_NULL */
2544 if (sk_flags
& SK_BT_NULLS_FIRST
)
2545 compare
= 1; /* NOT_NULL ">" NULL */
2547 compare
= -1; /* NOT_NULL "<" NULL */
2551 compare
= DatumGetInt32(myFunctionCall2(sortFunction
,
2554 if (sk_flags
& SK_BT_DESC
)
2562 * Non-inline ApplySortFunction() --- this is needed only to conform to
2563 * C99's brain-dead notions about how to implement inline functions...
2566 ApplySortFunction(FmgrInfo
*sortFunction
, int sortFlags
,
2567 Datum datum1
, bool isNull1
,
2568 Datum datum2
, bool isNull2
)
2570 return inlineApplySortFunction(sortFunction
, sortFlags
,
2577 * Routines specialized for HeapTuple (actually MinimalTuple) case
2581 comparetup_heap(const SortTuple
*a
, const SortTuple
*b
, Tuplesortstate
*state
)
2583 ScanKey scanKey
= state
->scanKeys
;
2590 /* Allow interrupting long sorts */
2591 CHECK_FOR_INTERRUPTS();
2593 /* Compare the leading sort key */
2594 compare
= inlineApplySortFunction(&scanKey
->sk_func
, scanKey
->sk_flags
,
2595 a
->datum1
, a
->isnull1
,
2596 b
->datum1
, b
->isnull1
);
2600 /* Compare additional sort keys */
2601 ltup
.t_len
= ((MinimalTuple
) a
->tuple
)->t_len
+ MINIMAL_TUPLE_OFFSET
;
2602 ltup
.t_data
= (HeapTupleHeader
) ((char *) a
->tuple
- MINIMAL_TUPLE_OFFSET
);
2603 rtup
.t_len
= ((MinimalTuple
) b
->tuple
)->t_len
+ MINIMAL_TUPLE_OFFSET
;
2604 rtup
.t_data
= (HeapTupleHeader
) ((char *) b
->tuple
- MINIMAL_TUPLE_OFFSET
);
2605 tupDesc
= state
->tupDesc
;
2607 for (nkey
= 1; nkey
< state
->nKeys
; nkey
++, scanKey
++)
2609 AttrNumber attno
= scanKey
->sk_attno
;
2615 datum1
= heap_getattr(<up
, attno
, tupDesc
, &isnull1
);
2616 datum2
= heap_getattr(&rtup
, attno
, tupDesc
, &isnull2
);
2618 compare
= inlineApplySortFunction(&scanKey
->sk_func
, scanKey
->sk_flags
,
2629 copytup_heap(Tuplesortstate
*state
, SortTuple
*stup
, void *tup
)
2632 * We expect the passed "tup" to be a TupleTableSlot, and form a
2633 * MinimalTuple using the exported interface for that.
2635 TupleTableSlot
*slot
= (TupleTableSlot
*) tup
;
2639 /* copy the tuple into sort storage */
2640 tuple
= ExecCopySlotMinimalTuple(slot
);
2641 stup
->tuple
= (void *) tuple
;
2642 USEMEM(state
, GetMemoryChunkSpace(tuple
));
2643 /* set up first-column key value */
2644 htup
.t_len
= tuple
->t_len
+ MINIMAL_TUPLE_OFFSET
;
2645 htup
.t_data
= (HeapTupleHeader
) ((char *) tuple
- MINIMAL_TUPLE_OFFSET
);
2646 stup
->datum1
= heap_getattr(&htup
,
2647 state
->scanKeys
[0].sk_attno
,
2653 writetup_heap(Tuplesortstate
*state
, int tapenum
, SortTuple
*stup
)
2655 MinimalTuple tuple
= (MinimalTuple
) stup
->tuple
;
2657 /* the part of the MinimalTuple we'll write: */
2658 char *tupbody
= (char *) tuple
+ MINIMAL_TUPLE_DATA_OFFSET
;
2659 unsigned int tupbodylen
= tuple
->t_len
- MINIMAL_TUPLE_DATA_OFFSET
;
2661 /* total on-disk footprint: */
2662 unsigned int tuplen
= tupbodylen
+ sizeof(int);
2664 LogicalTapeWrite(state
->tapeset
, tapenum
,
2665 (void *) &tuplen
, sizeof(tuplen
));
2666 LogicalTapeWrite(state
->tapeset
, tapenum
,
2667 (void *) tupbody
, tupbodylen
);
2668 if (state
->randomAccess
) /* need trailing length word? */
2669 LogicalTapeWrite(state
->tapeset
, tapenum
,
2670 (void *) &tuplen
, sizeof(tuplen
));
2672 FREEMEM(state
, GetMemoryChunkSpace(tuple
));
2673 heap_free_minimal_tuple(tuple
);
2677 readtup_heap(Tuplesortstate
*state
, SortTuple
*stup
,
2678 int tapenum
, unsigned int len
)
2680 unsigned int tupbodylen
= len
- sizeof(int);
2681 unsigned int tuplen
= tupbodylen
+ MINIMAL_TUPLE_DATA_OFFSET
;
2682 MinimalTuple tuple
= (MinimalTuple
) palloc(tuplen
);
2683 char *tupbody
= (char *) tuple
+ MINIMAL_TUPLE_DATA_OFFSET
;
2686 USEMEM(state
, GetMemoryChunkSpace(tuple
));
2687 /* read in the tuple proper */
2688 tuple
->t_len
= tuplen
;
2689 if (LogicalTapeRead(state
->tapeset
, tapenum
,
2691 tupbodylen
) != (size_t) tupbodylen
)
2692 elog(ERROR
, "unexpected end of data");
2693 if (state
->randomAccess
) /* need trailing length word? */
2694 if (LogicalTapeRead(state
->tapeset
, tapenum
, (void *) &tuplen
,
2695 sizeof(tuplen
)) != sizeof(tuplen
))
2696 elog(ERROR
, "unexpected end of data");
2697 stup
->tuple
= (void *) tuple
;
2698 /* set up first-column key value */
2699 htup
.t_len
= tuple
->t_len
+ MINIMAL_TUPLE_OFFSET
;
2700 htup
.t_data
= (HeapTupleHeader
) ((char *) tuple
- MINIMAL_TUPLE_OFFSET
);
2701 stup
->datum1
= heap_getattr(&htup
,
2702 state
->scanKeys
[0].sk_attno
,
2708 reversedirection_heap(Tuplesortstate
*state
)
2710 ScanKey scanKey
= state
->scanKeys
;
2713 for (nkey
= 0; nkey
< state
->nKeys
; nkey
++, scanKey
++)
2715 scanKey
->sk_flags
^= (SK_BT_DESC
| SK_BT_NULLS_FIRST
);
2721 * Routines specialized for IndexTuple case
2723 * The btree and hash cases require separate comparison functions, but the
2724 * IndexTuple representation is the same so the copy/write/read support
2725 * functions can be shared.
2729 comparetup_index_btree(const SortTuple
*a
, const SortTuple
*b
,
2730 Tuplesortstate
*state
)
2733 * This is similar to _bt_tuplecompare(), but we have already done the
2734 * index_getattr calls for the first column, and we need to keep track of
2735 * whether any null fields are present. Also see the special treatment
2736 * for equal keys at the end.
2738 ScanKey scanKey
= state
->indexScanKey
;
2743 bool equal_hasnull
= false;
2747 /* Allow interrupting long sorts */
2748 CHECK_FOR_INTERRUPTS();
2750 /* Compare the leading sort key */
2751 compare
= inlineApplySortFunction(&scanKey
->sk_func
, scanKey
->sk_flags
,
2752 a
->datum1
, a
->isnull1
,
2753 b
->datum1
, b
->isnull1
);
2757 /* they are equal, so we only need to examine one null flag */
2759 equal_hasnull
= true;
2761 /* Compare additional sort keys */
2762 tuple1
= (IndexTuple
) a
->tuple
;
2763 tuple2
= (IndexTuple
) b
->tuple
;
2764 keysz
= state
->nKeys
;
2765 tupDes
= RelationGetDescr(state
->indexRel
);
2767 for (nkey
= 2; nkey
<= keysz
; nkey
++, scanKey
++)
2774 datum1
= index_getattr(tuple1
, nkey
, tupDes
, &isnull1
);
2775 datum2
= index_getattr(tuple2
, nkey
, tupDes
, &isnull2
);
2777 compare
= inlineApplySortFunction(&scanKey
->sk_func
, scanKey
->sk_flags
,
2781 return compare
; /* done when we find unequal attributes */
2783 /* they are equal, so we only need to examine one null flag */
2785 equal_hasnull
= true;
2789 * If btree has asked us to enforce uniqueness, complain if two equal
2790 * tuples are detected (unless there was at least one NULL field).
2792 * It is sufficient to make the test here, because if two tuples are equal
2793 * they *must* get compared at some stage of the sort --- otherwise the
2794 * sort algorithm wouldn't have checked whether one must appear before the
2797 * Some rather brain-dead implementations of qsort will sometimes call the
2798 * comparison routine to compare a value to itself. (At this writing only
2799 * QNX 4 is known to do such silly things; we don't support QNX anymore,
2800 * but perhaps the behavior still exists elsewhere.) Don't raise a bogus
2801 * error in that case.
2803 if (state
->enforceUnique
&& !equal_hasnull
&& tuple1
!= tuple2
)
2805 (errcode(ERRCODE_UNIQUE_VIOLATION
),
2806 errmsg("could not create unique index \"%s\"",
2807 RelationGetRelationName(state
->indexRel
)),
2808 errdetail("Table contains duplicated values.")));
2811 * If key values are equal, we sort on ItemPointer. This does not affect
2812 * validity of the finished index, but it offers cheap insurance against
2813 * performance problems with bad qsort implementations that have trouble
2814 * with large numbers of equal keys.
2817 BlockNumber blk1
= ItemPointerGetBlockNumber(&tuple1
->t_tid
);
2818 BlockNumber blk2
= ItemPointerGetBlockNumber(&tuple2
->t_tid
);
2821 return (blk1
< blk2
) ? -1 : 1;
2824 OffsetNumber pos1
= ItemPointerGetOffsetNumber(&tuple1
->t_tid
);
2825 OffsetNumber pos2
= ItemPointerGetOffsetNumber(&tuple2
->t_tid
);
2828 return (pos1
< pos2
) ? -1 : 1;
2835 comparetup_index_hash(const SortTuple
*a
, const SortTuple
*b
,
2836 Tuplesortstate
*state
)
2843 /* Allow interrupting long sorts */
2844 CHECK_FOR_INTERRUPTS();
2847 * Fetch hash keys and mask off bits we don't want to sort by. We know
2848 * that the first column of the index tuple is the hash key.
2850 Assert(!a
->isnull1
);
2851 hash1
= DatumGetUInt32(a
->datum1
) & state
->hash_mask
;
2852 Assert(!b
->isnull1
);
2853 hash2
= DatumGetUInt32(b
->datum1
) & state
->hash_mask
;
2857 else if (hash1
< hash2
)
2861 * If hash values are equal, we sort on ItemPointer. This does not affect
2862 * validity of the finished index, but it offers cheap insurance against
2863 * performance problems with bad qsort implementations that have trouble
2864 * with large numbers of equal keys.
2866 tuple1
= (IndexTuple
) a
->tuple
;
2867 tuple2
= (IndexTuple
) b
->tuple
;
2870 BlockNumber blk1
= ItemPointerGetBlockNumber(&tuple1
->t_tid
);
2871 BlockNumber blk2
= ItemPointerGetBlockNumber(&tuple2
->t_tid
);
2874 return (blk1
< blk2
) ? -1 : 1;
2877 OffsetNumber pos1
= ItemPointerGetOffsetNumber(&tuple1
->t_tid
);
2878 OffsetNumber pos2
= ItemPointerGetOffsetNumber(&tuple2
->t_tid
);
2881 return (pos1
< pos2
) ? -1 : 1;
2888 copytup_index(Tuplesortstate
*state
, SortTuple
*stup
, void *tup
)
2890 IndexTuple tuple
= (IndexTuple
) tup
;
2891 unsigned int tuplen
= IndexTupleSize(tuple
);
2892 IndexTuple newtuple
;
2894 /* copy the tuple into sort storage */
2895 newtuple
= (IndexTuple
) palloc(tuplen
);
2896 memcpy(newtuple
, tuple
, tuplen
);
2897 USEMEM(state
, GetMemoryChunkSpace(newtuple
));
2898 stup
->tuple
= (void *) newtuple
;
2899 /* set up first-column key value */
2900 stup
->datum1
= index_getattr(newtuple
,
2902 RelationGetDescr(state
->indexRel
),
2907 writetup_index(Tuplesortstate
*state
, int tapenum
, SortTuple
*stup
)
2909 IndexTuple tuple
= (IndexTuple
) stup
->tuple
;
2910 unsigned int tuplen
;
2912 tuplen
= IndexTupleSize(tuple
) + sizeof(tuplen
);
2913 LogicalTapeWrite(state
->tapeset
, tapenum
,
2914 (void *) &tuplen
, sizeof(tuplen
));
2915 LogicalTapeWrite(state
->tapeset
, tapenum
,
2916 (void *) tuple
, IndexTupleSize(tuple
));
2917 if (state
->randomAccess
) /* need trailing length word? */
2918 LogicalTapeWrite(state
->tapeset
, tapenum
,
2919 (void *) &tuplen
, sizeof(tuplen
));
2921 FREEMEM(state
, GetMemoryChunkSpace(tuple
));
2926 readtup_index(Tuplesortstate
*state
, SortTuple
*stup
,
2927 int tapenum
, unsigned int len
)
2929 unsigned int tuplen
= len
- sizeof(unsigned int);
2930 IndexTuple tuple
= (IndexTuple
) palloc(tuplen
);
2932 USEMEM(state
, GetMemoryChunkSpace(tuple
));
2933 if (LogicalTapeRead(state
->tapeset
, tapenum
, (void *) tuple
,
2935 elog(ERROR
, "unexpected end of data");
2936 if (state
->randomAccess
) /* need trailing length word? */
2937 if (LogicalTapeRead(state
->tapeset
, tapenum
, (void *) &tuplen
,
2938 sizeof(tuplen
)) != sizeof(tuplen
))
2939 elog(ERROR
, "unexpected end of data");
2940 stup
->tuple
= (void *) tuple
;
2941 /* set up first-column key value */
2942 stup
->datum1
= index_getattr(tuple
,
2944 RelationGetDescr(state
->indexRel
),
2949 reversedirection_index_btree(Tuplesortstate
*state
)
2951 ScanKey scanKey
= state
->indexScanKey
;
2954 for (nkey
= 0; nkey
< state
->nKeys
; nkey
++, scanKey
++)
2956 scanKey
->sk_flags
^= (SK_BT_DESC
| SK_BT_NULLS_FIRST
);
2961 reversedirection_index_hash(Tuplesortstate
*state
)
2963 /* We don't support reversing direction in a hash index sort */
2964 elog(ERROR
, "reversedirection_index_hash is not implemented");
2969 * Routines specialized for DatumTuple case
2973 comparetup_datum(const SortTuple
*a
, const SortTuple
*b
, Tuplesortstate
*state
)
2975 /* Allow interrupting long sorts */
2976 CHECK_FOR_INTERRUPTS();
2978 return inlineApplySortFunction(&state
->sortOpFn
, state
->sortFnFlags
,
2979 a
->datum1
, a
->isnull1
,
2980 b
->datum1
, b
->isnull1
);
2984 copytup_datum(Tuplesortstate
*state
, SortTuple
*stup
, void *tup
)
2986 /* Not currently needed */
2987 elog(ERROR
, "copytup_datum() should not be called");
2991 writetup_datum(Tuplesortstate
*state
, int tapenum
, SortTuple
*stup
)
2994 unsigned int tuplen
;
2995 unsigned int writtenlen
;
3002 else if (state
->datumTypeByVal
)
3004 waddr
= &stup
->datum1
;
3005 tuplen
= sizeof(Datum
);
3009 waddr
= DatumGetPointer(stup
->datum1
);
3010 tuplen
= datumGetSize(stup
->datum1
, false, state
->datumTypeLen
);
3011 Assert(tuplen
!= 0);
3014 writtenlen
= tuplen
+ sizeof(unsigned int);
3016 LogicalTapeWrite(state
->tapeset
, tapenum
,
3017 (void *) &writtenlen
, sizeof(writtenlen
));
3018 LogicalTapeWrite(state
->tapeset
, tapenum
,
3020 if (state
->randomAccess
) /* need trailing length word? */
3021 LogicalTapeWrite(state
->tapeset
, tapenum
,
3022 (void *) &writtenlen
, sizeof(writtenlen
));
3026 FREEMEM(state
, GetMemoryChunkSpace(stup
->tuple
));
3032 readtup_datum(Tuplesortstate
*state
, SortTuple
*stup
,
3033 int tapenum
, unsigned int len
)
3035 unsigned int tuplen
= len
- sizeof(unsigned int);
3040 stup
->datum1
= (Datum
) 0;
3041 stup
->isnull1
= true;
3044 else if (state
->datumTypeByVal
)
3046 Assert(tuplen
== sizeof(Datum
));
3047 if (LogicalTapeRead(state
->tapeset
, tapenum
, (void *) &stup
->datum1
,
3049 elog(ERROR
, "unexpected end of data");
3050 stup
->isnull1
= false;
3055 void *raddr
= palloc(tuplen
);
3057 if (LogicalTapeRead(state
->tapeset
, tapenum
, raddr
,
3059 elog(ERROR
, "unexpected end of data");
3060 stup
->datum1
= PointerGetDatum(raddr
);
3061 stup
->isnull1
= false;
3062 stup
->tuple
= raddr
;
3063 USEMEM(state
, GetMemoryChunkSpace(raddr
));
3066 if (state
->randomAccess
) /* need trailing length word? */
3067 if (LogicalTapeRead(state
->tapeset
, tapenum
, (void *) &tuplen
,
3068 sizeof(tuplen
)) != sizeof(tuplen
))
3069 elog(ERROR
, "unexpected end of data");
3073 reversedirection_datum(Tuplesortstate
*state
)
3075 state
->sortFnFlags
^= (SK_BT_DESC
| SK_BT_NULLS_FIRST
);
3079 * Convenience routine to free a tuple previously loaded into sort memory
3082 free_sort_tuple(Tuplesortstate
*state
, SortTuple
*stup
)
3084 FREEMEM(state
, GetMemoryChunkSpace(stup
->tuple
));