1 /*-------------------------------------------------------------------------
4 * internal structures for hash joins
7 * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
12 *-------------------------------------------------------------------------
18 #include "storage/buffile.h"
20 /* ----------------------------------------------------------------
21 * hash-join hash table structures
23 * Each active hashjoin has a HashJoinTable control block, which is
24 * palloc'd in the executor's per-query context. All other storage needed
25 * for the hashjoin is kept in private memory contexts, two for each hashjoin.
26 * This makes it easy and fast to release the storage when we don't need it
27 * anymore. (Exception: data associated with the temp files lives in the
28 * per-query context too, since we always call buffile.c in that context.)
30 * The hashtable contexts are made children of the per-query context, ensuring
31 * that they will be discarded at end of statement even if the join is
32 * aborted early by an error. (Likewise, any temporary files we make will
33 * be cleaned up by the virtual file manager in event of an error.)
35 * Storage that should live through the entire join is allocated from the
36 * "hashCxt", while storage that is only wanted for the current batch is
37 * allocated in the "batchCxt". By resetting the batchCxt at the end of
38 * each batch, we free all the per-batch storage reliably and without tedium.
40 * During first scan of inner relation, we get its tuples from executor.
41 * If nbatch > 1 then tuples that don't belong in first batch get saved
42 * into inner-batch temp files. The same statements apply for the
43 * first scan of the outer relation, except we write tuples to outer-batch
44 * temp files. After finishing the first scan, we do the following for
45 * each remaining batch:
46 * 1. Read tuples from inner batch file, load into hash buckets.
47 * 2. Read tuples from outer batch file, match to hash buckets and output.
49 * It is possible to increase nbatch on the fly if the in-memory hash table
50 * gets too big. The hash-value-to-batch computation is arranged so that this
51 * can only cause a tuple to go into a later batch than previously thought,
52 * never into an earlier batch. When we increase nbatch, we rescan the hash
53 * table and dump out any tuples that are now of a later batch to the correct
54 * inner batch file. Subsequently, while reading either inner or outer batch
55 * files, we might find tuples that no longer belong to the current batch;
56 * if so, we just dump them out to the correct batch file.
57 * ----------------------------------------------------------------
60 /* these are in nodes/execnodes.h: */
61 /* typedef struct HashJoinTupleData *HashJoinTuple; */
62 /* typedef struct HashJoinTableData *HashJoinTable; */
64 typedef struct HashJoinTupleData
66 struct HashJoinTupleData
*next
; /* link to next tuple in same bucket */
67 uint32 hashvalue
; /* tuple's hash code */
68 /* Tuple data, in MinimalTuple format, follows on a MAXALIGN boundary */
71 #define HJTUPLE_OVERHEAD MAXALIGN(sizeof(HashJoinTupleData))
72 #define HJTUPLE_MINTUPLE(hjtup) \
73 ((MinimalTuple) ((char *) (hjtup) + HJTUPLE_OVERHEAD))
76 * If the outer relation's distribution is sufficiently nonuniform, we attempt
77 * to optimize the join by treating the hash values corresponding to the outer
78 * relation's MCVs specially. Inner relation tuples matching these hash
79 * values go into the "skew" hashtable instead of the main hashtable, and
80 * outer relation tuples with these hash values are matched against that
81 * table instead of the main one. Thus, tuples with these hash values are
82 * effectively handled as part of the first batch and will never go to disk.
83 * The skew hashtable is limited to SKEW_WORK_MEM_PERCENT of the total memory
84 * allowed for the join; while building the hashtables, we decrease the number
85 * of MCVs being specially treated if needed to stay under this limit.
87 * Note: you might wonder why we look at the outer relation stats for this,
88 * rather than the inner. One reason is that the outer relation is typically
89 * bigger, so we get more I/O savings by optimizing for its most common values.
90 * Also, for similarly-sized relations, the planner prefers to put the more
91 * uniformly distributed relation on the inside, so we're more likely to find
92 * interesting skew in the outer relation.
94 typedef struct HashSkewBucket
96 uint32 hashvalue
; /* common hash value */
97 HashJoinTuple tuples
; /* linked list of inner-relation tuples */
100 #define SKEW_BUCKET_OVERHEAD MAXALIGN(sizeof(HashSkewBucket))
101 #define INVALID_SKEW_BUCKET_NO (-1)
102 #define SKEW_WORK_MEM_PERCENT 2
103 #define SKEW_MIN_OUTER_FRACTION 0.01
106 typedef struct HashJoinTableData
108 int nbuckets
; /* # buckets in the in-memory hash table */
109 int log2_nbuckets
; /* its log2 (nbuckets must be a power of 2) */
111 /* buckets[i] is head of list of tuples in i'th in-memory bucket */
112 struct HashJoinTupleData
**buckets
;
113 /* buckets array is per-batch storage, as are all the tuples */
115 bool skewEnabled
; /* are we using skew optimization? */
116 HashSkewBucket
**skewBucket
; /* hashtable of skew buckets */
117 int skewBucketLen
; /* size of skewBucket array (a power of 2!) */
118 int nSkewBuckets
; /* number of active skew buckets */
119 int *skewBucketNums
; /* array indexes of active skew buckets */
121 int nbatch
; /* number of batches */
122 int curbatch
; /* current batch #; 0 during 1st pass */
124 int nbatch_original
; /* nbatch when we started inner scan */
125 int nbatch_outstart
; /* nbatch when we started outer scan */
127 bool growEnabled
; /* flag to shut off nbatch increases */
129 double totalTuples
; /* # tuples obtained from inner plan */
132 * These arrays are allocated for the life of the hash join, but only if
133 * nbatch > 1. A file is opened only when we first write a tuple into it
134 * (otherwise its pointer remains NULL). Note that the zero'th array
135 * elements never get used, since we will process rather than dump out any
136 * tuples of batch zero.
138 BufFile
**innerBatchFile
; /* buffered virtual temp file per batch */
139 BufFile
**outerBatchFile
; /* buffered virtual temp file per batch */
142 * Info about the datatype-specific hash functions for the datatypes being
143 * hashed. These are arrays of the same length as the number of hash join
144 * clauses (hash keys).
146 FmgrInfo
*outer_hashfunctions
; /* lookup data for hash functions */
147 FmgrInfo
*inner_hashfunctions
; /* lookup data for hash functions */
148 bool *hashStrict
; /* is each hash join operator strict? */
150 Size spaceUsed
; /* memory space currently used by tuples */
151 Size spaceAllowed
; /* upper limit for space used */
152 Size spaceUsedSkew
; /* skew hash table's current space usage */
153 Size spaceAllowedSkew
; /* upper limit for skew hashtable */
155 MemoryContext hashCxt
; /* context for whole-hash-join storage */
156 MemoryContext batchCxt
; /* context for this-batch-only storage */
159 #endif /* HASHJOIN_H */