1 /*-------------------------------------------------------------------------
4 * header file for postgres btree access method implementation.
7 * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
10 * src/include/access/nbtree.h
12 *-------------------------------------------------------------------------
17 #include "access/amapi.h"
18 #include "access/itup.h"
19 #include "access/sdir.h"
20 #include "access/xlogreader.h"
21 #include "catalog/pg_am_d.h"
22 #include "catalog/pg_index.h"
23 #include "lib/stringinfo.h"
24 #include "storage/bufmgr.h"
25 #include "storage/shm_toc.h"
27 /* There's room for a 16-bit vacuum cycle ID in BTPageOpaqueData */
28 typedef uint16 BTCycleId
;
31 * BTPageOpaqueData -- At the end of every page, we store a pointer
32 * to both siblings in the tree. This is used to do forward/backward
33 * index scans. The next-page link is also critical for recovery when
34 * a search has navigated to the wrong page due to concurrent page splits
35 * or deletions; see src/backend/access/nbtree/README for more info.
37 * In addition, we store the page's btree level (counting upwards from
38 * zero at a leaf page) as well as some flag bits indicating the page type
39 * and status. If the page is deleted, we replace the level with the
40 * next-transaction-ID value indicating when it is safe to reclaim the page.
42 * We also store a "vacuum cycle ID". When a page is split while VACUUM is
43 * processing the index, a nonzero value associated with the VACUUM run is
44 * stored into both halves of the split page. (If VACUUM is not running,
45 * both pages receive zero cycleids.) This allows VACUUM to detect whether
46 * a page was split since it started, with a small probability of false match
47 * if the page was last split some exact multiple of MAX_BT_CYCLE_ID VACUUMs
48 * ago. Also, during a split, the BTP_SPLIT_END flag is cleared in the left
49 * (original) page, and set in the right page, but only if the next page
50 * to its right has a different cycleid.
52 * NOTE: the BTP_LEAF flag bit is redundant since level==0 could be tested
56 typedef struct BTPageOpaqueData
58 BlockNumber btpo_prev
; /* left sibling, or P_NONE if leftmost */
59 BlockNumber btpo_next
; /* right sibling, or P_NONE if rightmost */
62 uint32 level
; /* tree level --- zero for leaf pages */
63 TransactionId xact
; /* next transaction ID, if deleted */
65 uint16 btpo_flags
; /* flag bits, see below */
66 BTCycleId btpo_cycleid
; /* vacuum cycle ID of latest split */
69 typedef BTPageOpaqueData
*BTPageOpaque
;
71 /* Bits defined in btpo_flags */
72 #define BTP_LEAF (1 << 0) /* leaf page, i.e. not internal page */
73 #define BTP_ROOT (1 << 1) /* root page (has no parent) */
74 #define BTP_DELETED (1 << 2) /* page has been deleted from tree */
75 #define BTP_META (1 << 3) /* meta-page */
76 #define BTP_HALF_DEAD (1 << 4) /* empty, but still in tree */
77 #define BTP_SPLIT_END (1 << 5) /* rightmost page of split group */
78 #define BTP_HAS_GARBAGE (1 << 6) /* page has LP_DEAD tuples (deprecated) */
79 #define BTP_INCOMPLETE_SPLIT (1 << 7) /* right sibling's downlink is missing */
82 * The max allowed value of a cycle ID is a bit less than 64K. This is
83 * for convenience of pg_filedump and similar utilities: we want to use
84 * the last 2 bytes of special space as an index type indicator, and
85 * restricting cycle ID lets btree use that space for vacuum cycle IDs
86 * while still allowing index type to be identified.
88 #define MAX_BT_CYCLE_ID 0xFF7F
92 * The Meta page is always the first page in the btree index.
93 * Its primary purpose is to point to the location of the btree root page.
94 * We also point to the "fast" root, which is the current effective root;
95 * see README for discussion.
98 typedef struct BTMetaPageData
100 uint32 btm_magic
; /* should contain BTREE_MAGIC */
101 uint32 btm_version
; /* nbtree version (always <= BTREE_VERSION) */
102 BlockNumber btm_root
; /* current root location */
103 uint32 btm_level
; /* tree level of the root page */
104 BlockNumber btm_fastroot
; /* current "fast" root location */
105 uint32 btm_fastlevel
; /* tree level of the "fast" root page */
106 /* remaining fields only valid when btm_version >= BTREE_NOVAC_VERSION */
107 TransactionId btm_oldest_btpo_xact
; /* oldest btpo_xact among all deleted
109 float8 btm_last_cleanup_num_heap_tuples
; /* number of heap tuples
110 * during last cleanup */
111 bool btm_allequalimage
; /* are all columns "equalimage"? */
114 #define BTPageGetMeta(p) \
115 ((BTMetaPageData *) PageGetContents(p))
118 * The current Btree version is 4. That's what you'll get when you create
121 * Btree version 3 was used in PostgreSQL v11. It is mostly the same as
122 * version 4, but heap TIDs were not part of the keyspace. Index tuples
123 * with duplicate keys could be stored in any order. We continue to
124 * support reading and writing Btree versions 2 and 3, so that they don't
125 * need to be immediately re-indexed at pg_upgrade. In order to get the
126 * new heapkeyspace semantics, however, a REINDEX is needed.
128 * Deduplication is safe to use when the btm_allequalimage field is set to
129 * true. It's safe to read the btm_allequalimage field on version 3, but
130 * only version 4 indexes make use of deduplication. Even version 4
131 * indexes created on PostgreSQL v12 will need a REINDEX to make use of
132 * deduplication, though, since there is no other way to set
133 * btm_allequalimage to true (pg_upgrade hasn't been taught to set the
136 * Btree version 2 is mostly the same as version 3. There are two new
137 * fields in the metapage that were introduced in version 3. A version 2
138 * metapage will be automatically upgraded to version 3 on the first
139 * insert to it. INCLUDE indexes cannot use version 2.
141 #define BTREE_METAPAGE 0 /* first page is meta */
142 #define BTREE_MAGIC 0x053162 /* magic number in metapage */
143 #define BTREE_VERSION 4 /* current version number */
144 #define BTREE_MIN_VERSION 2 /* minimum supported version */
145 #define BTREE_NOVAC_VERSION 3 /* version with all meta fields set */
148 * Maximum size of a btree index entry, including its tuple header.
150 * We actually need to be able to fit three items on every page,
151 * so restrict any one item to 1/3 the per-page available space.
153 * There are rare cases where _bt_truncate() will need to enlarge
154 * a heap index tuple to make space for a tiebreaker heap TID
155 * attribute, which we account for here.
157 #define BTMaxItemSize(page) \
158 MAXALIGN_DOWN((PageGetPageSize(page) - \
159 MAXALIGN(SizeOfPageHeaderData + \
160 3*sizeof(ItemIdData) + \
161 3*sizeof(ItemPointerData)) - \
162 MAXALIGN(sizeof(BTPageOpaqueData))) / 3)
163 #define BTMaxItemSizeNoHeapTid(page) \
164 MAXALIGN_DOWN((PageGetPageSize(page) - \
165 MAXALIGN(SizeOfPageHeaderData + 3*sizeof(ItemIdData)) - \
166 MAXALIGN(sizeof(BTPageOpaqueData))) / 3)
169 * MaxTIDsPerBTreePage is an upper bound on the number of heap TIDs tuples
170 * that may be stored on a btree leaf page. It is used to size the
171 * per-page temporary buffers used by index scans.
173 * Note: we don't bother considering per-tuple overheads here to keep
174 * things simple (value is based on how many elements a single array of
175 * heap TIDs must have to fill the space between the page header and
176 * special area). The value is slightly higher (i.e. more conservative)
177 * than necessary as a result, which is considered acceptable.
179 #define MaxTIDsPerBTreePage \
180 (int) ((BLCKSZ - SizeOfPageHeaderData - sizeof(BTPageOpaqueData)) / \
181 sizeof(ItemPointerData))
184 * The leaf-page fillfactor defaults to 90% but is user-adjustable.
185 * For pages above the leaf level, we use a fixed 70% fillfactor.
186 * The fillfactor is applied during index build and when splitting
187 * a rightmost page; when splitting non-rightmost pages we try to
188 * divide the data equally. When splitting a page that's entirely
189 * filled with a single value (duplicates), the effective leaf-page
190 * fillfactor is 96%, regardless of whether the page is a rightmost
193 #define BTREE_MIN_FILLFACTOR 10
194 #define BTREE_DEFAULT_FILLFACTOR 90
195 #define BTREE_NONLEAF_FILLFACTOR 70
196 #define BTREE_SINGLEVAL_FILLFACTOR 96
199 * In general, the btree code tries to localize its knowledge about
200 * page layout to a couple of routines. However, we need a special
201 * value to indicate "no page number" in those places where we expect
202 * page numbers. We can use zero for this because we never need to
203 * make a pointer to the metadata page.
209 * Macros to test whether a page is leftmost or rightmost on its tree level,
210 * as well as other state info kept in the opaque data.
212 #define P_LEFTMOST(opaque) ((opaque)->btpo_prev == P_NONE)
213 #define P_RIGHTMOST(opaque) ((opaque)->btpo_next == P_NONE)
214 #define P_ISLEAF(opaque) (((opaque)->btpo_flags & BTP_LEAF) != 0)
215 #define P_ISROOT(opaque) (((opaque)->btpo_flags & BTP_ROOT) != 0)
216 #define P_ISDELETED(opaque) (((opaque)->btpo_flags & BTP_DELETED) != 0)
217 #define P_ISMETA(opaque) (((opaque)->btpo_flags & BTP_META) != 0)
218 #define P_ISHALFDEAD(opaque) (((opaque)->btpo_flags & BTP_HALF_DEAD) != 0)
219 #define P_IGNORE(opaque) (((opaque)->btpo_flags & (BTP_DELETED|BTP_HALF_DEAD)) != 0)
220 #define P_HAS_GARBAGE(opaque) (((opaque)->btpo_flags & BTP_HAS_GARBAGE) != 0)
221 #define P_INCOMPLETE_SPLIT(opaque) (((opaque)->btpo_flags & BTP_INCOMPLETE_SPLIT) != 0)
224 * Lehman and Yao's algorithm requires a ``high key'' on every non-rightmost
225 * page. The high key is not a tuple that is used to visit the heap. It is
226 * a pivot tuple (see "Notes on B-Tree tuple format" below for definition).
227 * The high key on a page is required to be greater than or equal to any
228 * other key that appears on the page. If we find ourselves trying to
229 * insert a key that is strictly > high key, we know we need to move right
230 * (this should only happen if the page was split since we examined the
233 * Our insertion algorithm guarantees that we can use the initial least key
234 * on our right sibling as the high key. Once a page is created, its high
235 * key changes only if the page is split.
237 * On a non-rightmost page, the high key lives in item 1 and data items
238 * start in item 2. Rightmost pages have no high key, so we store data
239 * items beginning in item 1.
242 #define P_HIKEY ((OffsetNumber) 1)
243 #define P_FIRSTKEY ((OffsetNumber) 2)
244 #define P_FIRSTDATAKEY(opaque) (P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY)
247 * Notes on B-Tree tuple format, and key and non-key attributes:
249 * INCLUDE B-Tree indexes have non-key attributes. These are extra
250 * attributes that may be returned by index-only scans, but do not influence
251 * the order of items in the index (formally, non-key attributes are not
252 * considered to be part of the key space). Non-key attributes are only
253 * present in leaf index tuples whose item pointers actually point to heap
254 * tuples (non-pivot tuples). _bt_check_natts() enforces the rules
257 * Non-pivot tuple format (plain/non-posting variant):
259 * t_tid | t_info | key values | INCLUDE columns, if any
261 * t_tid points to the heap TID, which is a tiebreaker key column as of
264 * Non-pivot tuples complement pivot tuples, which only have key columns.
265 * The sole purpose of pivot tuples is to represent how the key space is
266 * separated. In general, any B-Tree index that has more than one level
267 * (i.e. any index that does not just consist of a metapage and a single
268 * leaf root page) must have some number of pivot tuples, since pivot
269 * tuples are used for traversing the tree. Suffix truncation can omit
270 * trailing key columns when a new pivot is formed, which makes minus
271 * infinity their logical value. Since BTREE_VERSION 4 indexes treat heap
272 * TID as a trailing key column that ensures that all index tuples are
273 * physically unique, it is necessary to represent heap TID as a trailing
274 * key column in pivot tuples, though very often this can be truncated
275 * away, just like any other key column. (Actually, the heap TID is
276 * omitted rather than truncated, since its representation is different to
277 * the non-pivot representation.)
279 * Pivot tuple format:
281 * t_tid | t_info | key values | [heap TID]
283 * We store the number of columns present inside pivot tuples by abusing
284 * their t_tid offset field, since pivot tuples never need to store a real
285 * offset (pivot tuples generally store a downlink in t_tid, though). The
286 * offset field only stores the number of columns/attributes when the
287 * INDEX_ALT_TID_MASK bit is set, which doesn't count the trailing heap
288 * TID column sometimes stored in pivot tuples -- that's represented by
289 * the presence of BT_PIVOT_HEAP_TID_ATTR. The INDEX_ALT_TID_MASK bit in
290 * t_info is always set on BTREE_VERSION 4 pivot tuples, since
291 * BTreeTupleIsPivot() must work reliably on heapkeyspace versions.
293 * In version 2 or version 3 (!heapkeyspace) indexes, INDEX_ALT_TID_MASK
294 * might not be set in pivot tuples. BTreeTupleIsPivot() won't work
295 * reliably as a result. The number of columns stored is implicitly the
296 * same as the number of columns in the index, just like any non-pivot
297 * tuple. (The number of columns stored should not vary, since suffix
298 * truncation of key columns is unsafe within any !heapkeyspace index.)
300 * The 12 least significant bits from t_tid's offset number are used to
301 * represent the number of key columns within a pivot tuple. This leaves 4
302 * status bits (BT_STATUS_OFFSET_MASK bits), which are shared by all tuples
303 * that have the INDEX_ALT_TID_MASK bit set (set in t_info) to store basic
304 * tuple metadata. BTreeTupleIsPivot() and BTreeTupleIsPosting() use the
305 * BT_STATUS_OFFSET_MASK bits.
307 * Sometimes non-pivot tuples also use a representation that repurposes
308 * t_tid to store metadata rather than a TID. PostgreSQL v13 introduced a
309 * new non-pivot tuple format to support deduplication: posting list
310 * tuples. Deduplication merges together multiple equal non-pivot tuples
311 * into a logically equivalent, space efficient representation. A posting
312 * list is an array of ItemPointerData elements. Non-pivot tuples are
313 * merged together to form posting list tuples lazily, at the point where
314 * we'd otherwise have to split a leaf page.
316 * Posting tuple format (alternative non-pivot tuple representation):
318 * t_tid | t_info | key values | posting list (TID array)
320 * Posting list tuples are recognized as such by having the
321 * INDEX_ALT_TID_MASK status bit set in t_info and the BT_IS_POSTING status
322 * bit set in t_tid's offset number. These flags redefine the content of
323 * the posting tuple's t_tid to store the location of the posting list
324 * (instead of a block number), as well as the total number of heap TIDs
325 * present in the tuple (instead of a real offset number).
327 * The 12 least significant bits from t_tid's offset number are used to
328 * represent the number of heap TIDs present in the tuple, leaving 4 status
329 * bits (the BT_STATUS_OFFSET_MASK bits). Like any non-pivot tuple, the
330 * number of columns stored is always implicitly the total number in the
331 * index (in practice there can never be non-key columns stored, since
332 * deduplication is not supported with INCLUDE indexes).
334 #define INDEX_ALT_TID_MASK INDEX_AM_RESERVED_BIT
336 /* Item pointer offset bit masks */
337 #define BT_OFFSET_MASK 0x0FFF
338 #define BT_STATUS_OFFSET_MASK 0xF000
339 /* BT_STATUS_OFFSET_MASK status bits */
340 #define BT_PIVOT_HEAP_TID_ATTR 0x1000
341 #define BT_IS_POSTING 0x2000
344 * Note: BTreeTupleIsPivot() can have false negatives (but not false
345 * positives) when used with !heapkeyspace indexes
348 BTreeTupleIsPivot(IndexTuple itup
)
350 if ((itup
->t_info
& INDEX_ALT_TID_MASK
) == 0)
352 /* absence of BT_IS_POSTING in offset number indicates pivot tuple */
353 if ((ItemPointerGetOffsetNumberNoCheck(&itup
->t_tid
) & BT_IS_POSTING
) != 0)
360 BTreeTupleIsPosting(IndexTuple itup
)
362 if ((itup
->t_info
& INDEX_ALT_TID_MASK
) == 0)
364 /* presence of BT_IS_POSTING in offset number indicates posting tuple */
365 if ((ItemPointerGetOffsetNumberNoCheck(&itup
->t_tid
) & BT_IS_POSTING
) == 0)
372 BTreeTupleSetPosting(IndexTuple itup
, uint16 nhtids
, int postingoffset
)
375 Assert((nhtids
& BT_STATUS_OFFSET_MASK
) == 0);
376 Assert((size_t) postingoffset
== MAXALIGN(postingoffset
));
377 Assert(postingoffset
< INDEX_SIZE_MASK
);
378 Assert(!BTreeTupleIsPivot(itup
));
380 itup
->t_info
|= INDEX_ALT_TID_MASK
;
381 ItemPointerSetOffsetNumber(&itup
->t_tid
, (nhtids
| BT_IS_POSTING
));
382 ItemPointerSetBlockNumber(&itup
->t_tid
, postingoffset
);
386 BTreeTupleGetNPosting(IndexTuple posting
)
388 OffsetNumber existing
;
390 Assert(BTreeTupleIsPosting(posting
));
392 existing
= ItemPointerGetOffsetNumberNoCheck(&posting
->t_tid
);
393 return (existing
& BT_OFFSET_MASK
);
397 BTreeTupleGetPostingOffset(IndexTuple posting
)
399 Assert(BTreeTupleIsPosting(posting
));
401 return ItemPointerGetBlockNumberNoCheck(&posting
->t_tid
);
404 static inline ItemPointer
405 BTreeTupleGetPosting(IndexTuple posting
)
407 return (ItemPointer
) ((char *) posting
+
408 BTreeTupleGetPostingOffset(posting
));
411 static inline ItemPointer
412 BTreeTupleGetPostingN(IndexTuple posting
, int n
)
414 return BTreeTupleGetPosting(posting
) + n
;
418 * Get/set downlink block number in pivot tuple.
420 * Note: Cannot assert that tuple is a pivot tuple. If we did so then
421 * !heapkeyspace indexes would exhibit false positive assertion failures.
423 static inline BlockNumber
424 BTreeTupleGetDownLink(IndexTuple pivot
)
426 return ItemPointerGetBlockNumberNoCheck(&pivot
->t_tid
);
430 BTreeTupleSetDownLink(IndexTuple pivot
, BlockNumber blkno
)
432 ItemPointerSetBlockNumber(&pivot
->t_tid
, blkno
);
436 * Get number of attributes within tuple.
438 * Note that this does not include an implicit tiebreaker heap TID
439 * attribute, if any. Note also that the number of key attributes must be
440 * explicitly represented in all heapkeyspace pivot tuples.
442 * Note: This is defined as a macro rather than an inline function to
443 * avoid including rel.h.
445 #define BTreeTupleGetNAtts(itup, rel) \
447 (BTreeTupleIsPivot(itup)) ? \
449 ItemPointerGetOffsetNumberNoCheck(&(itup)->t_tid) & BT_OFFSET_MASK \
452 IndexRelationGetNumberOfAttributes(rel) \
456 * Set number of key attributes in tuple.
458 * The heap TID tiebreaker attribute bit may also be set here, indicating that
459 * a heap TID value will be stored at the end of the tuple (i.e. using the
460 * special pivot tuple representation).
463 BTreeTupleSetNAtts(IndexTuple itup
, uint16 nkeyatts
, bool heaptid
)
465 Assert(nkeyatts
<= INDEX_MAX_KEYS
);
466 Assert((nkeyatts
& BT_STATUS_OFFSET_MASK
) == 0);
467 Assert(!heaptid
|| nkeyatts
> 0);
468 Assert(!BTreeTupleIsPivot(itup
) || nkeyatts
== 0);
470 itup
->t_info
|= INDEX_ALT_TID_MASK
;
473 nkeyatts
|= BT_PIVOT_HEAP_TID_ATTR
;
475 /* BT_IS_POSTING bit is deliberately unset here */
476 ItemPointerSetOffsetNumber(&itup
->t_tid
, nkeyatts
);
477 Assert(BTreeTupleIsPivot(itup
));
481 * Get/set leaf page's "top parent" link from its high key. Used during page
484 * Note: Cannot assert that tuple is a pivot tuple. If we did so then
485 * !heapkeyspace indexes would exhibit false positive assertion failures.
487 static inline BlockNumber
488 BTreeTupleGetTopParent(IndexTuple leafhikey
)
490 return ItemPointerGetBlockNumberNoCheck(&leafhikey
->t_tid
);
494 BTreeTupleSetTopParent(IndexTuple leafhikey
, BlockNumber blkno
)
496 ItemPointerSetBlockNumber(&leafhikey
->t_tid
, blkno
);
497 BTreeTupleSetNAtts(leafhikey
, 0, false);
501 * Get tiebreaker heap TID attribute, if any.
503 * This returns the first/lowest heap TID in the case of a posting list tuple.
505 static inline ItemPointer
506 BTreeTupleGetHeapTID(IndexTuple itup
)
508 if (BTreeTupleIsPivot(itup
))
510 /* Pivot tuple heap TID representation? */
511 if ((ItemPointerGetOffsetNumberNoCheck(&itup
->t_tid
) &
512 BT_PIVOT_HEAP_TID_ATTR
) != 0)
513 return (ItemPointer
) ((char *) itup
+ IndexTupleSize(itup
) -
514 sizeof(ItemPointerData
));
516 /* Heap TID attribute was truncated */
519 else if (BTreeTupleIsPosting(itup
))
520 return BTreeTupleGetPosting(itup
);
526 * Get maximum heap TID attribute, which could be the only TID in the case of
527 * a non-pivot tuple that does not have a posting list tuple.
529 * Works with non-pivot tuples only.
531 static inline ItemPointer
532 BTreeTupleGetMaxHeapTID(IndexTuple itup
)
534 Assert(!BTreeTupleIsPivot(itup
));
536 if (BTreeTupleIsPosting(itup
))
538 uint16 nposting
= BTreeTupleGetNPosting(itup
);
540 return BTreeTupleGetPostingN(itup
, nposting
- 1);
547 * Operator strategy numbers for B-tree have been moved to access/stratnum.h,
548 * because many places need to use them in ScanKeyInit() calls.
550 * The strategy numbers are chosen so that we can commute them by
553 #define BTCommuteStrategyNumber(strat) (BTMaxStrategyNumber + 1 - (strat))
556 * When a new operator class is declared, we require that the user
557 * supply us with an amproc procedure (BTORDER_PROC) for determining
558 * whether, for two keys a and b, a < b, a = b, or a > b. This routine
559 * must return < 0, 0, > 0, respectively, in these three cases.
561 * To facilitate accelerated sorting, an operator class may choose to
562 * offer a second procedure (BTSORTSUPPORT_PROC). For full details, see
563 * src/include/utils/sortsupport.h.
565 * To support window frames defined by "RANGE offset PRECEDING/FOLLOWING",
566 * an operator class may choose to offer a third amproc procedure
567 * (BTINRANGE_PROC), independently of whether it offers sortsupport.
568 * For full details, see doc/src/sgml/btree.sgml.
570 * To facilitate B-Tree deduplication, an operator class may choose to
571 * offer a forth amproc procedure (BTEQUALIMAGE_PROC). For full details,
572 * see doc/src/sgml/btree.sgml.
575 #define BTORDER_PROC 1
576 #define BTSORTSUPPORT_PROC 2
577 #define BTINRANGE_PROC 3
578 #define BTEQUALIMAGE_PROC 4
579 #define BTOPTIONS_PROC 5
583 * We need to be able to tell the difference between read and write
584 * requests for pages, in order to do locking correctly.
587 #define BT_READ BUFFER_LOCK_SHARE
588 #define BT_WRITE BUFFER_LOCK_EXCLUSIVE
591 * BTStackData -- As we descend a tree, we push the location of pivot
592 * tuples whose downlink we are about to follow onto a private stack. If
593 * we split a leaf, we use this stack to walk back up the tree and insert
594 * data into its parent page at the correct location. We also have to
595 * recursively insert into the grandparent page if and when the parent page
596 * splits. Our private stack can become stale due to concurrent page
597 * splits and page deletions, but it should never give us an irredeemably
600 typedef struct BTStackData
602 BlockNumber bts_blkno
;
603 OffsetNumber bts_offset
;
604 struct BTStackData
*bts_parent
;
607 typedef BTStackData
*BTStack
;
610 * BTScanInsertData is the btree-private state needed to find an initial
611 * position for an indexscan, or to insert new tuples -- an "insertion
612 * scankey" (not to be confused with a search scankey). It's used to descend
613 * a B-Tree using _bt_search.
615 * heapkeyspace indicates if we expect all keys in the index to be physically
616 * unique because heap TID is used as a tiebreaker attribute, and if index may
617 * have truncated key attributes in pivot tuples. This is actually a property
618 * of the index relation itself (not an indexscan). heapkeyspace indexes are
619 * indexes whose version is >= version 4. It's convenient to keep this close
620 * by, rather than accessing the metapage repeatedly.
622 * allequalimage is set to indicate that deduplication is safe for the index.
623 * This is also a property of the index relation rather than an indexscan.
625 * anynullkeys indicates if any of the keys had NULL value when scankey was
626 * built from index tuple (note that already-truncated tuple key attributes
627 * set NULL as a placeholder key value, which also affects value of
628 * anynullkeys). This is a convenience for unique index non-pivot tuple
629 * insertion, which usually temporarily unsets scantid, but shouldn't iff
630 * anynullkeys is true. Value generally matches non-pivot tuple's HasNulls
631 * bit, but may not when inserting into an INCLUDE index (tuple header value
632 * is affected by the NULL-ness of both key and non-key attributes).
634 * When nextkey is false (the usual case), _bt_search and _bt_binsrch will
635 * locate the first item >= scankey. When nextkey is true, they will locate
636 * the first item > scan key.
638 * pivotsearch is set to true by callers that want to re-find a leaf page
639 * using a scankey built from a leaf page's high key. Most callers set this
642 * scantid is the heap TID that is used as a final tiebreaker attribute. It
643 * is set to NULL when index scan doesn't need to find a position for a
644 * specific physical tuple. Must be set when inserting new tuples into
645 * heapkeyspace indexes, since every tuple in the tree unambiguously belongs
646 * in one exact position (it's never set with !heapkeyspace indexes, though).
647 * Despite the representational difference, nbtree search code considers
648 * scantid to be just another insertion scankey attribute.
650 * scankeys is an array of scan key entries for attributes that are compared
651 * before scantid (user-visible attributes). keysz is the size of the array.
652 * During insertion, there must be a scan key for every attribute, but when
653 * starting a regular index scan some can be omitted. The array is used as a
654 * flexible array member, though it's sized in a way that makes it possible to
655 * use stack allocations. See nbtree/README for full details.
657 typedef struct BTScanInsertData
664 ItemPointer scantid
; /* tiebreaker for scankeys */
665 int keysz
; /* Size of scankeys array */
666 ScanKeyData scankeys
[INDEX_MAX_KEYS
]; /* Must appear last */
669 typedef BTScanInsertData
*BTScanInsert
;
672 * BTInsertStateData is a working area used during insertion.
674 * This is filled in after descending the tree to the first leaf page the new
675 * tuple might belong on. Tracks the current position while performing
676 * uniqueness check, before we have determined which exact page to insert
679 * (This should be private to nbtinsert.c, but it's also used by
680 * _bt_binsrch_insert)
682 typedef struct BTInsertStateData
684 IndexTuple itup
; /* Item we're inserting */
685 Size itemsz
; /* Size of itup -- should be MAXALIGN()'d */
686 BTScanInsert itup_key
; /* Insertion scankey */
688 /* Buffer containing leaf page we're likely to insert itup on */
692 * Cache of bounds within the current buffer. Only used for insertions
693 * where _bt_check_unique is called. See _bt_binsrch_insert and
694 * _bt_findinsertloc for details.
698 OffsetNumber stricthigh
;
701 * if _bt_binsrch_insert found the location inside existing posting list,
702 * save the position inside the list. -1 sentinel value indicates overlap
703 * with an existing posting list tuple that has its LP_DEAD bit set.
708 typedef BTInsertStateData
*BTInsertState
;
711 * State used to representing an individual pending tuple during
714 typedef struct BTDedupInterval
716 OffsetNumber baseoff
;
721 * BTDedupStateData is a working area used during deduplication.
723 * The status info fields track the state of a whole-page deduplication pass.
724 * State about the current pending posting list is also tracked.
726 * A pending posting list is comprised of a contiguous group of equal items
727 * from the page, starting from page offset number 'baseoff'. This is the
728 * offset number of the "base" tuple for new posting list. 'nitems' is the
729 * current total number of existing items from the page that will be merged to
730 * make a new posting list tuple, including the base tuple item. (Existing
731 * items may themselves be posting list tuples, or regular non-pivot tuples.)
733 * The total size of the existing tuples to be freed when pending posting list
734 * is processed gets tracked by 'phystupsize'. This information allows
735 * deduplication to calculate the space saving for each new posting list
736 * tuple, and for the entire pass over the page as a whole.
738 typedef struct BTDedupStateData
740 /* Deduplication status info for entire pass over page */
741 bool deduplicate
; /* Still deduplicating page? */
742 int nmaxitems
; /* Number of max-sized tuples so far */
743 Size maxpostingsize
; /* Limit on size of final tuple */
745 /* Metadata about base tuple of current pending posting list */
746 IndexTuple base
; /* Use to form new posting list */
747 OffsetNumber baseoff
; /* page offset of base */
748 Size basetupsize
; /* base size without original posting list */
750 /* Other metadata about pending posting list */
751 ItemPointer htids
; /* Heap TIDs in pending posting list */
752 int nhtids
; /* Number of heap TIDs in htids array */
753 int nitems
; /* Number of existing tuples/line pointers */
754 Size phystupsize
; /* Includes line pointer overhead */
757 * Array of tuples to go on new version of the page. Contains one entry
758 * for each group of consecutive items. Note that existing tuples that
759 * will not become posting list tuples do not appear in the array (they
760 * are implicitly unchanged by deduplication pass).
762 int nintervals
; /* current number of intervals in array */
763 BTDedupInterval intervals
[MaxIndexTuplesPerPage
];
766 typedef BTDedupStateData
*BTDedupState
;
769 * BTVacuumPostingData is state that represents how to VACUUM a posting list
770 * tuple when some (though not all) of its TIDs are to be deleted.
772 * Convention is that itup field is the original posting list tuple on input,
773 * and palloc()'d final tuple used to overwrite existing tuple on output.
775 typedef struct BTVacuumPostingData
777 /* Tuple that will be/was updated */
779 OffsetNumber updatedoffset
;
781 /* State needed to describe final itup in WAL */
783 uint16 deletetids
[FLEXIBLE_ARRAY_MEMBER
];
784 } BTVacuumPostingData
;
786 typedef BTVacuumPostingData
*BTVacuumPosting
;
789 * BTScanOpaqueData is the btree-private state needed for an indexscan.
790 * This consists of preprocessed scan keys (see _bt_preprocess_keys() for
791 * details of the preprocessing), information about the current location
792 * of the scan, and information about the marked location, if any. (We use
793 * BTScanPosData to represent the data needed for each of current and marked
794 * locations.) In addition we can remember some known-killed index entries
795 * that must be marked before we can move off the current page.
797 * Index scans work a page at a time: we pin and read-lock the page, identify
798 * all the matching items on the page and save them in BTScanPosData, then
799 * release the read-lock while returning the items to the caller for
800 * processing. This approach minimizes lock/unlock traffic. Note that we
801 * keep the pin on the index page until the caller is done with all the items
802 * (this is needed for VACUUM synchronization, see nbtree/README). When we
803 * are ready to step to the next page, if the caller has told us any of the
804 * items were killed, we re-lock the page to mark them killed, then unlock.
805 * Finally we drop the pin and step to the next page in the appropriate
808 * If we are doing an index-only scan, we save the entire IndexTuple for each
809 * matched item, otherwise only its heap TID and offset. The IndexTuples go
810 * into a separate workspace array; each BTScanPosItem stores its tuple's
811 * offset within that array. Posting list tuples store a "base" tuple once,
812 * allowing the same key to be returned for each TID in the posting list
816 typedef struct BTScanPosItem
/* what we remember about each match */
818 ItemPointerData heapTid
; /* TID of referenced heap item */
819 OffsetNumber indexOffset
; /* index item's location within page */
820 LocationIndex tupleOffset
; /* IndexTuple's offset in workspace, if any */
823 typedef struct BTScanPosData
825 Buffer buf
; /* if valid, the buffer is pinned */
827 XLogRecPtr lsn
; /* pos in the WAL stream when page was read */
828 BlockNumber currPage
; /* page referenced by items array */
829 BlockNumber nextPage
; /* page's right link when we scanned it */
832 * moreLeft and moreRight track whether we think there may be matching
833 * index entries to the left and right of the current page, respectively.
834 * We can clear the appropriate one of these flags when _bt_checkkeys()
835 * returns continuescan = false.
841 * If we are doing an index-only scan, nextTupleOffset is the first free
842 * location in the associated tuple storage workspace.
847 * The items array is always ordered in index order (ie, increasing
848 * indexoffset). When scanning backwards it is convenient to fill the
849 * array back-to-front, so we start at the last slot and fill downwards.
850 * Hence we need both a first-valid-entry and a last-valid-entry counter.
851 * itemIndex is a cursor showing which entry was last returned to caller.
853 int firstItem
; /* first valid index in items[] */
854 int lastItem
; /* last valid index in items[] */
855 int itemIndex
; /* current index in items[] */
857 BTScanPosItem items
[MaxTIDsPerBTreePage
]; /* MUST BE LAST */
860 typedef BTScanPosData
*BTScanPos
;
862 #define BTScanPosIsPinned(scanpos) \
864 AssertMacro(BlockNumberIsValid((scanpos).currPage) || \
865 !BufferIsValid((scanpos).buf)), \
866 BufferIsValid((scanpos).buf) \
868 #define BTScanPosUnpin(scanpos) \
870 ReleaseBuffer((scanpos).buf); \
871 (scanpos).buf = InvalidBuffer; \
873 #define BTScanPosUnpinIfPinned(scanpos) \
875 if (BTScanPosIsPinned(scanpos)) \
876 BTScanPosUnpin(scanpos); \
879 #define BTScanPosIsValid(scanpos) \
881 AssertMacro(BlockNumberIsValid((scanpos).currPage) || \
882 !BufferIsValid((scanpos).buf)), \
883 BlockNumberIsValid((scanpos).currPage) \
885 #define BTScanPosInvalidate(scanpos) \
887 (scanpos).currPage = InvalidBlockNumber; \
888 (scanpos).nextPage = InvalidBlockNumber; \
889 (scanpos).buf = InvalidBuffer; \
890 (scanpos).lsn = InvalidXLogRecPtr; \
891 (scanpos).nextTupleOffset = 0; \
894 /* We need one of these for each equality-type SK_SEARCHARRAY scan key */
895 typedef struct BTArrayKeyInfo
897 int scan_key
; /* index of associated key in arrayKeyData */
898 int cur_elem
; /* index of current element in elem_values */
899 int mark_elem
; /* index of marked element in elem_values */
900 int num_elems
; /* number of elems in current array value */
901 Datum
*elem_values
; /* array of num_elems Datums */
904 typedef struct BTScanOpaqueData
906 /* these fields are set by _bt_preprocess_keys(): */
907 bool qual_ok
; /* false if qual can never be satisfied */
908 int numberOfKeys
; /* number of preprocessed scan keys */
909 ScanKey keyData
; /* array of preprocessed scan keys */
911 /* workspace for SK_SEARCHARRAY support */
912 ScanKey arrayKeyData
; /* modified copy of scan->keyData */
913 int numArrayKeys
; /* number of equality-type array keys (-1 if
914 * there are any unsatisfiable array keys) */
915 int arrayKeyCount
; /* count indicating number of array scan keys
917 BTArrayKeyInfo
*arrayKeys
; /* info about each equality-type array key */
918 MemoryContext arrayContext
; /* scan-lifespan context for array data */
920 /* info about killed items if any (killedItems is NULL if never used) */
921 int *killedItems
; /* currPos.items indexes of killed items */
922 int numKilled
; /* number of currently stored items */
925 * If we are doing an index-only scan, these are the tuple storage
926 * workspaces for the currPos and markPos respectively. Each is of size
927 * BLCKSZ, so it can hold as much as a full page's worth of tuples.
929 char *currTuples
; /* tuple storage for currPos */
930 char *markTuples
; /* tuple storage for markPos */
933 * If the marked position is on the same page as current position, we
934 * don't use markPos, but just keep the marked itemIndex in markItemIndex
935 * (all the rest of currPos is valid for the mark position). Hence, to
936 * determine if there is a mark, first look at markItemIndex, then at
939 int markItemIndex
; /* itemIndex, or -1 if not valid */
941 /* keep these last in struct for efficiency */
942 BTScanPosData currPos
; /* current position data */
943 BTScanPosData markPos
; /* marked position, if any */
946 typedef BTScanOpaqueData
*BTScanOpaque
;
949 * We use some private sk_flags bits in preprocessed scan keys. We're allowed
950 * to use bits 16-31 (see skey.h). The uppermost bits are copied from the
951 * index's indoption[] array entry for the index attribute.
953 #define SK_BT_REQFWD 0x00010000 /* required to continue forward scan */
954 #define SK_BT_REQBKWD 0x00020000 /* required to continue backward scan */
955 #define SK_BT_INDOPTION_SHIFT 24 /* must clear the above bits */
956 #define SK_BT_DESC (INDOPTION_DESC << SK_BT_INDOPTION_SHIFT)
957 #define SK_BT_NULLS_FIRST (INDOPTION_NULLS_FIRST << SK_BT_INDOPTION_SHIFT)
959 typedef struct BTOptions
961 int32 varlena_header_
; /* varlena header (do not touch directly!) */
962 int fillfactor
; /* page fill factor in percent (0..100) */
963 /* fraction of newly inserted tuples prior to trigger index cleanup */
964 float8 vacuum_cleanup_index_scale_factor
;
965 bool deduplicate_items
; /* Try to deduplicate items? */
968 #define BTGetFillFactor(relation) \
969 (AssertMacro(relation->rd_rel->relkind == RELKIND_INDEX && \
970 relation->rd_rel->relam == BTREE_AM_OID), \
971 (relation)->rd_options ? \
972 ((BTOptions *) (relation)->rd_options)->fillfactor : \
973 BTREE_DEFAULT_FILLFACTOR)
974 #define BTGetTargetPageFreeSpace(relation) \
975 (BLCKSZ * (100 - BTGetFillFactor(relation)) / 100)
976 #define BTGetDeduplicateItems(relation) \
977 (AssertMacro(relation->rd_rel->relkind == RELKIND_INDEX && \
978 relation->rd_rel->relam == BTREE_AM_OID), \
979 ((relation)->rd_options ? \
980 ((BTOptions *) (relation)->rd_options)->deduplicate_items : true))
983 * Constant definition for progress reporting. Phase numbers must match
986 /* PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE is 1 (see progress.h) */
987 #define PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN 2
988 #define PROGRESS_BTREE_PHASE_PERFORMSORT_1 3
989 #define PROGRESS_BTREE_PHASE_PERFORMSORT_2 4
990 #define PROGRESS_BTREE_PHASE_LEAF_LOAD 5
993 * external entry points for btree, in nbtree.c
995 extern void btbuildempty(Relation index
);
996 extern bool btinsert(Relation rel
, Datum
*values
, bool *isnull
,
997 ItemPointer ht_ctid
, Relation heapRel
,
998 IndexUniqueCheck checkUnique
,
1000 struct IndexInfo
*indexInfo
);
1001 extern IndexScanDesc
btbeginscan(Relation rel
, int nkeys
, int norderbys
);
1002 extern Size
btestimateparallelscan(void);
1003 extern void btinitparallelscan(void *target
);
1004 extern bool btgettuple(IndexScanDesc scan
, ScanDirection dir
);
1005 extern int64
btgetbitmap(IndexScanDesc scan
, TIDBitmap
*tbm
);
1006 extern void btrescan(IndexScanDesc scan
, ScanKey scankey
, int nscankeys
,
1007 ScanKey orderbys
, int norderbys
);
1008 extern void btparallelrescan(IndexScanDesc scan
);
1009 extern void btendscan(IndexScanDesc scan
);
1010 extern void btmarkpos(IndexScanDesc scan
);
1011 extern void btrestrpos(IndexScanDesc scan
);
1012 extern IndexBulkDeleteResult
*btbulkdelete(IndexVacuumInfo
*info
,
1013 IndexBulkDeleteResult
*stats
,
1014 IndexBulkDeleteCallback callback
,
1015 void *callback_state
);
1016 extern IndexBulkDeleteResult
*btvacuumcleanup(IndexVacuumInfo
*info
,
1017 IndexBulkDeleteResult
*stats
);
1018 extern bool btcanreturn(Relation index
, int attno
);
1021 * prototypes for internal functions in nbtree.c
1023 extern bool _bt_parallel_seize(IndexScanDesc scan
, BlockNumber
*pageno
);
1024 extern void _bt_parallel_release(IndexScanDesc scan
, BlockNumber scan_page
);
1025 extern void _bt_parallel_done(IndexScanDesc scan
);
1026 extern void _bt_parallel_advance_array_keys(IndexScanDesc scan
);
1029 * prototypes for functions in nbtdedup.c
1031 extern void _bt_dedup_pass(Relation rel
, Buffer buf
, Relation heapRel
,
1032 IndexTuple newitem
, Size newitemsz
,
1033 bool checkingunique
);
1034 extern void _bt_dedup_start_pending(BTDedupState state
, IndexTuple base
,
1035 OffsetNumber baseoff
);
1036 extern bool _bt_dedup_save_htid(BTDedupState state
, IndexTuple itup
);
1037 extern Size
_bt_dedup_finish_pending(Page newpage
, BTDedupState state
);
1038 extern IndexTuple
_bt_form_posting(IndexTuple base
, ItemPointer htids
,
1040 extern void _bt_update_posting(BTVacuumPosting vacposting
);
1041 extern IndexTuple
_bt_swap_posting(IndexTuple newitem
, IndexTuple oposting
,
1045 * prototypes for functions in nbtinsert.c
1047 extern bool _bt_doinsert(Relation rel
, IndexTuple itup
,
1048 IndexUniqueCheck checkUnique
, Relation heapRel
);
1049 extern void _bt_finish_split(Relation rel
, Buffer lbuf
, BTStack stack
);
1050 extern Buffer
_bt_getstackbuf(Relation rel
, BTStack stack
, BlockNumber child
);
1053 * prototypes for functions in nbtsplitloc.c
1055 extern OffsetNumber
_bt_findsplitloc(Relation rel
, Page origpage
,
1056 OffsetNumber newitemoff
, Size newitemsz
, IndexTuple newitem
,
1057 bool *newitemonleft
);
1060 * prototypes for functions in nbtpage.c
1062 extern void _bt_initmetapage(Page page
, BlockNumber rootbknum
, uint32 level
,
1063 bool allequalimage
);
1064 extern void _bt_update_meta_cleanup_info(Relation rel
,
1065 TransactionId oldestBtpoXact
, float8 numHeapTuples
);
1066 extern void _bt_upgrademetapage(Page page
);
1067 extern Buffer
_bt_getroot(Relation rel
, int access
);
1068 extern Buffer
_bt_gettrueroot(Relation rel
);
1069 extern int _bt_getrootheight(Relation rel
);
1070 extern void _bt_metaversion(Relation rel
, bool *heapkeyspace
,
1071 bool *allequalimage
);
1072 extern void _bt_checkpage(Relation rel
, Buffer buf
);
1073 extern Buffer
_bt_getbuf(Relation rel
, BlockNumber blkno
, int access
);
1074 extern Buffer
_bt_relandgetbuf(Relation rel
, Buffer obuf
,
1075 BlockNumber blkno
, int access
);
1076 extern void _bt_relbuf(Relation rel
, Buffer buf
);
1077 extern void _bt_lockbuf(Relation rel
, Buffer buf
, int access
);
1078 extern void _bt_unlockbuf(Relation rel
, Buffer buf
);
1079 extern bool _bt_conditionallockbuf(Relation rel
, Buffer buf
);
1080 extern void _bt_upgradelockbufcleanup(Relation rel
, Buffer buf
);
1081 extern void _bt_pageinit(Page page
, Size size
);
1082 extern bool _bt_page_recyclable(Page page
);
1083 extern void _bt_delitems_vacuum(Relation rel
, Buffer buf
,
1084 OffsetNumber
*deletable
, int ndeletable
,
1085 BTVacuumPosting
*updatable
, int nupdatable
);
1086 extern void _bt_delitems_delete(Relation rel
, Buffer buf
,
1087 OffsetNumber
*deletable
, int ndeletable
,
1089 extern uint32
_bt_pagedel(Relation rel
, Buffer leafbuf
,
1090 TransactionId
*oldestBtpoXact
);
1093 * prototypes for functions in nbtsearch.c
1095 extern BTStack
_bt_search(Relation rel
, BTScanInsert key
, Buffer
*bufP
,
1096 int access
, Snapshot snapshot
);
1097 extern Buffer
_bt_moveright(Relation rel
, BTScanInsert key
, Buffer buf
,
1098 bool forupdate
, BTStack stack
, int access
, Snapshot snapshot
);
1099 extern OffsetNumber
_bt_binsrch_insert(Relation rel
, BTInsertState insertstate
);
1100 extern int32
_bt_compare(Relation rel
, BTScanInsert key
, Page page
, OffsetNumber offnum
);
1101 extern bool _bt_first(IndexScanDesc scan
, ScanDirection dir
);
1102 extern bool _bt_next(IndexScanDesc scan
, ScanDirection dir
);
1103 extern Buffer
_bt_get_endpoint(Relation rel
, uint32 level
, bool rightmost
,
1107 * prototypes for functions in nbtutils.c
1109 extern BTScanInsert
_bt_mkscankey(Relation rel
, IndexTuple itup
);
1110 extern void _bt_freestack(BTStack stack
);
1111 extern void _bt_preprocess_array_keys(IndexScanDesc scan
);
1112 extern void _bt_start_array_keys(IndexScanDesc scan
, ScanDirection dir
);
1113 extern bool _bt_advance_array_keys(IndexScanDesc scan
, ScanDirection dir
);
1114 extern void _bt_mark_array_keys(IndexScanDesc scan
);
1115 extern void _bt_restore_array_keys(IndexScanDesc scan
);
1116 extern void _bt_preprocess_keys(IndexScanDesc scan
);
1117 extern bool _bt_checkkeys(IndexScanDesc scan
, IndexTuple tuple
,
1118 int tupnatts
, ScanDirection dir
, bool *continuescan
);
1119 extern void _bt_killitems(IndexScanDesc scan
);
1120 extern BTCycleId
_bt_vacuum_cycleid(Relation rel
);
1121 extern BTCycleId
_bt_start_vacuum(Relation rel
);
1122 extern void _bt_end_vacuum(Relation rel
);
1123 extern void _bt_end_vacuum_callback(int code
, Datum arg
);
1124 extern Size
BTreeShmemSize(void);
1125 extern void BTreeShmemInit(void);
1126 extern bytea
*btoptions(Datum reloptions
, bool validate
);
1127 extern bool btproperty(Oid index_oid
, int attno
,
1128 IndexAMProperty prop
, const char *propname
,
1129 bool *res
, bool *isnull
);
1130 extern char *btbuildphasename(int64 phasenum
);
1131 extern IndexTuple
_bt_truncate(Relation rel
, IndexTuple lastleft
,
1132 IndexTuple firstright
, BTScanInsert itup_key
);
1133 extern int _bt_keep_natts_fast(Relation rel
, IndexTuple lastleft
,
1134 IndexTuple firstright
);
1135 extern bool _bt_check_natts(Relation rel
, bool heapkeyspace
, Page page
,
1136 OffsetNumber offnum
);
1137 extern void _bt_check_third_page(Relation rel
, Relation heap
,
1138 bool needheaptidspace
, Page page
, IndexTuple newtup
);
1139 extern bool _bt_allequalimage(Relation rel
, bool debugmessage
);
1142 * prototypes for functions in nbtvalidate.c
1144 extern bool btvalidate(Oid opclassoid
);
1145 extern void btadjustmembers(Oid opfamilyoid
,
1151 * prototypes for functions in nbtsort.c
1153 extern IndexBuildResult
*btbuild(Relation heap
, Relation index
,
1154 struct IndexInfo
*indexInfo
);
1155 extern void _bt_parallel_build_main(dsm_segment
*seg
, shm_toc
*toc
);
1157 #endif /* NBTREE_H */