1 /*-------------------------------------------------------------------------
4 * POSTGRES heap tuple header definitions.
7 * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
10 * src/include/access/htup_details.h
12 *-------------------------------------------------------------------------
14 #ifndef HTUP_DETAILS_H
15 #define HTUP_DETAILS_H
17 #include "access/htup.h"
18 #include "access/transam.h"
19 #include "access/tupdesc.h"
20 #include "access/tupmacs.h"
21 #include "storage/bufpage.h"
24 * MaxTupleAttributeNumber limits the number of (user) columns in a tuple.
25 * The key limit on this value is that the size of the fixed overhead for
26 * a tuple, plus the size of the null-values bitmap (at 1 bit per column),
27 * plus MAXALIGN alignment, must fit into t_hoff which is uint8. On most
28 * machines the upper limit without making t_hoff wider would be a little
29 * over 1700. We use round numbers here and for MaxHeapAttributeNumber
30 * so that alterations in HeapTupleHeaderData layout won't change the
31 * supported max number of columns.
33 #define MaxTupleAttributeNumber 1664 /* 8 * 208 */
36 * MaxHeapAttributeNumber limits the number of (user) columns in a table.
37 * This should be somewhat less than MaxTupleAttributeNumber. It must be
38 * at least one less, else we will fail to do UPDATEs on a maximal-width
39 * table (because UPDATE has to form working tuples that include CTID).
40 * In practice we want some additional daylight so that we can gracefully
41 * support operations that add hidden "resjunk" columns, for example
42 * SELECT * FROM wide_table ORDER BY foo, bar, baz.
43 * In any case, depending on column data types you will likely be running
44 * into the disk-block-based limit on overall tuple size if you have more
45 * than a thousand or so columns. TOAST won't help.
47 #define MaxHeapAttributeNumber 1600 /* 8 * 200 */
50 * Heap tuple header. To avoid wasting space, the fields should be
51 * laid out in such a way as to avoid structure padding.
53 * Datums of composite types (row types) share the same general structure
54 * as on-disk tuples, so that the same routines can be used to build and
55 * examine them. However the requirements are slightly different: a Datum
56 * does not need any transaction visibility information, and it does need
57 * a length word and some embedded type information. We can achieve this
58 * by overlaying the xmin/cmin/xmax/cmax/xvac fields of a heap tuple
59 * with the fields needed in the Datum case. Typically, all tuples built
60 * in-memory will be initialized with the Datum fields; but when a tuple is
61 * about to be inserted in a table, the transaction fields will be filled,
62 * overwriting the datum fields.
64 * The overall structure of a heap tuple looks like:
65 * fixed fields (HeapTupleHeaderData struct)
66 * nulls bitmap (if HEAP_HASNULL is set in t_infomask)
67 * alignment padding (as needed to make user data MAXALIGN'd)
68 * object ID (if HEAP_HASOID_OLD is set in t_infomask, not created
72 * We store five "virtual" fields Xmin, Cmin, Xmax, Cmax, and Xvac in three
73 * physical fields. Xmin and Xmax are always really stored, but Cmin, Cmax
74 * and Xvac share a field. This works because we know that Cmin and Cmax
75 * are only interesting for the lifetime of the inserting and deleting
76 * transaction respectively. If a tuple is inserted and deleted in the same
77 * transaction, we store a "combo" command id that can be mapped to the real
78 * cmin and cmax, but only by use of local state within the originating
79 * backend. See combocid.c for more details. Meanwhile, Xvac is only set by
80 * old-style VACUUM FULL, which does not have any command sub-structure and so
81 * does not need either Cmin or Cmax. (This requires that old-style VACUUM
82 * FULL never try to move a tuple whose Cmin or Cmax is still interesting,
83 * ie, an insert-in-progress or delete-in-progress tuple.)
85 * A word about t_ctid: whenever a new tuple is stored on disk, its t_ctid
86 * is initialized with its own TID (location). If the tuple is ever updated,
87 * its t_ctid is changed to point to the replacement version of the tuple. Or
88 * if the tuple is moved from one partition to another, due to an update of
89 * the partition key, t_ctid is set to a special value to indicate that
90 * (see ItemPointerSetMovedPartitions). Thus, a tuple is the latest version
91 * of its row iff XMAX is invalid or
92 * t_ctid points to itself (in which case, if XMAX is valid, the tuple is
93 * either locked or deleted). One can follow the chain of t_ctid links
94 * to find the newest version of the row, unless it was moved to a different
95 * partition. Beware however that VACUUM might
96 * erase the pointed-to (newer) tuple before erasing the pointing (older)
97 * tuple. Hence, when following a t_ctid link, it is necessary to check
98 * to see if the referenced slot is empty or contains an unrelated tuple.
99 * Check that the referenced tuple has XMIN equal to the referencing tuple's
100 * XMAX to verify that it is actually the descendant version and not an
101 * unrelated tuple stored into a slot recently freed by VACUUM. If either
102 * check fails, one may assume that there is no live descendant version.
104 * t_ctid is sometimes used to store a speculative insertion token, instead
105 * of a real TID. A speculative token is set on a tuple that's being
106 * inserted, until the inserter is sure that it wants to go ahead with the
107 * insertion. Hence a token should only be seen on a tuple with an XMAX
108 * that's still in-progress, or invalid/aborted. The token is replaced with
109 * the tuple's real TID when the insertion is confirmed. One should never
110 * see a speculative insertion token while following a chain of t_ctid links,
111 * because they are not used on updates, only insertions.
113 * Following the fixed header fields, the nulls bitmap is stored (beginning
114 * at t_bits). The bitmap is *not* stored if t_infomask shows that there
115 * are no nulls in the tuple. If an OID field is present (as indicated by
116 * t_infomask), then it is stored just before the user data, which begins at
117 * the offset shown by t_hoff. Note that t_hoff must be a multiple of
121 typedef struct HeapTupleFields
123 TransactionId t_xmin
; /* inserting xact ID */
124 TransactionId t_xmax
; /* deleting or locking xact ID */
128 CommandId t_cid
; /* inserting or deleting command ID, or both */
129 TransactionId t_xvac
; /* old-style VACUUM FULL xact ID */
133 typedef struct DatumTupleFields
135 int32 datum_len_
; /* varlena header (do not touch directly!) */
137 int32 datum_typmod
; /* -1, or identifier of a record type */
139 Oid datum_typeid
; /* composite type OID, or RECORDOID */
142 * datum_typeid cannot be a domain over composite, only plain composite,
143 * even if the datum is meant as a value of a domain-over-composite type.
144 * This is in line with the general principle that CoerceToDomain does not
145 * change the physical representation of the base type value.
147 * Note: field ordering is chosen with thought that Oid might someday
152 struct HeapTupleHeaderData
156 HeapTupleFields t_heap
;
157 DatumTupleFields t_datum
;
160 ItemPointerData t_ctid
; /* current TID of this or newer tuple (or a
161 * speculative insertion token) */
163 /* Fields below here must match MinimalTupleData! */
165 #define FIELDNO_HEAPTUPLEHEADERDATA_INFOMASK2 2
166 uint16 t_infomask2
; /* number of attributes + various flags */
168 #define FIELDNO_HEAPTUPLEHEADERDATA_INFOMASK 3
169 uint16 t_infomask
; /* various flag bits, see below */
171 #define FIELDNO_HEAPTUPLEHEADERDATA_HOFF 4
172 uint8 t_hoff
; /* sizeof header incl. bitmap, padding */
174 /* ^ - 23 bytes - ^ */
176 #define FIELDNO_HEAPTUPLEHEADERDATA_BITS 5
177 bits8 t_bits
[FLEXIBLE_ARRAY_MEMBER
]; /* bitmap of NULLs */
179 /* MORE DATA FOLLOWS AT END OF STRUCT */
182 /* typedef appears in htup.h */
184 #define SizeofHeapTupleHeader offsetof(HeapTupleHeaderData, t_bits)
187 * information stored in t_infomask:
189 #define HEAP_HASNULL 0x0001 /* has null attribute(s) */
190 #define HEAP_HASVARWIDTH 0x0002 /* has variable-width attribute(s) */
191 #define HEAP_HASEXTERNAL 0x0004 /* has external stored attribute(s) */
192 #define HEAP_HASOID_OLD 0x0008 /* has an object-id field */
193 #define HEAP_XMAX_KEYSHR_LOCK 0x0010 /* xmax is a key-shared locker */
194 #define HEAP_COMBOCID 0x0020 /* t_cid is a combo CID */
195 #define HEAP_XMAX_EXCL_LOCK 0x0040 /* xmax is exclusive locker */
196 #define HEAP_XMAX_LOCK_ONLY 0x0080 /* xmax, if valid, is only a locker */
198 /* xmax is a shared locker */
199 #define HEAP_XMAX_SHR_LOCK (HEAP_XMAX_EXCL_LOCK | HEAP_XMAX_KEYSHR_LOCK)
201 #define HEAP_LOCK_MASK (HEAP_XMAX_SHR_LOCK | HEAP_XMAX_EXCL_LOCK | \
202 HEAP_XMAX_KEYSHR_LOCK)
203 #define HEAP_XMIN_COMMITTED 0x0100 /* t_xmin committed */
204 #define HEAP_XMIN_INVALID 0x0200 /* t_xmin invalid/aborted */
205 #define HEAP_XMIN_FROZEN (HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID)
206 #define HEAP_XMAX_COMMITTED 0x0400 /* t_xmax committed */
207 #define HEAP_XMAX_INVALID 0x0800 /* t_xmax invalid/aborted */
208 #define HEAP_XMAX_IS_MULTI 0x1000 /* t_xmax is a MultiXactId */
209 #define HEAP_UPDATED 0x2000 /* this is UPDATEd version of row */
210 #define HEAP_MOVED_OFF 0x4000 /* moved to another place by pre-9.0
211 * VACUUM FULL; kept for binary
213 #define HEAP_MOVED_IN 0x8000 /* moved from another place by pre-9.0
214 * VACUUM FULL; kept for binary
216 #define HEAP_MOVED (HEAP_MOVED_OFF | HEAP_MOVED_IN)
218 #define HEAP_XACT_MASK 0xFFF0 /* visibility-related bits */
221 * A tuple is only locked (i.e. not updated by its Xmax) if the
222 * HEAP_XMAX_LOCK_ONLY bit is set; or, for pg_upgrade's sake, if the Xmax is
223 * not a multi and the EXCL_LOCK bit is set.
225 * See also HeapTupleHeaderIsOnlyLocked, which also checks for a possible
226 * aborted updater transaction.
228 * Beware of multiple evaluations of the argument.
230 #define HEAP_XMAX_IS_LOCKED_ONLY(infomask) \
231 (((infomask) & HEAP_XMAX_LOCK_ONLY) || \
232 (((infomask) & (HEAP_XMAX_IS_MULTI | HEAP_LOCK_MASK)) == HEAP_XMAX_EXCL_LOCK))
235 * A tuple that has HEAP_XMAX_IS_MULTI and HEAP_XMAX_LOCK_ONLY but neither of
236 * HEAP_XMAX_EXCL_LOCK and HEAP_XMAX_KEYSHR_LOCK must come from a tuple that was
237 * share-locked in 9.2 or earlier and then pg_upgrade'd.
239 * In 9.2 and prior, HEAP_XMAX_IS_MULTI was only set when there were multiple
240 * FOR SHARE lockers of that tuple. That set HEAP_XMAX_LOCK_ONLY (with a
241 * different name back then) but neither of HEAP_XMAX_EXCL_LOCK and
242 * HEAP_XMAX_KEYSHR_LOCK. That combination is no longer possible in 9.3 and
243 * up, so if we see that combination we know for certain that the tuple was
244 * locked in an earlier release; since all such lockers are gone (they cannot
245 * survive through pg_upgrade), such tuples can safely be considered not
248 * We must not resolve such multixacts locally, because the result would be
249 * bogus, regardless of where they stand with respect to the current valid
252 #define HEAP_LOCKED_UPGRADED(infomask) \
254 ((infomask) & HEAP_XMAX_IS_MULTI) != 0 && \
255 ((infomask) & HEAP_XMAX_LOCK_ONLY) != 0 && \
256 (((infomask) & (HEAP_XMAX_EXCL_LOCK | HEAP_XMAX_KEYSHR_LOCK)) == 0) \
260 * Use these to test whether a particular lock is applied to a tuple
262 #define HEAP_XMAX_IS_SHR_LOCKED(infomask) \
263 (((infomask) & HEAP_LOCK_MASK) == HEAP_XMAX_SHR_LOCK)
264 #define HEAP_XMAX_IS_EXCL_LOCKED(infomask) \
265 (((infomask) & HEAP_LOCK_MASK) == HEAP_XMAX_EXCL_LOCK)
266 #define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) \
267 (((infomask) & HEAP_LOCK_MASK) == HEAP_XMAX_KEYSHR_LOCK)
269 /* turn these all off when Xmax is to change */
270 #define HEAP_XMAX_BITS (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID | \
271 HEAP_XMAX_IS_MULTI | HEAP_LOCK_MASK | HEAP_XMAX_LOCK_ONLY)
274 * information stored in t_infomask2:
276 #define HEAP_NATTS_MASK 0x07FF /* 11 bits for number of attributes */
277 /* bits 0x1800 are available */
278 #define HEAP_KEYS_UPDATED 0x2000 /* tuple was updated and key cols
279 * modified, or tuple deleted */
280 #define HEAP_HOT_UPDATED 0x4000 /* tuple was HOT-updated */
281 #define HEAP_ONLY_TUPLE 0x8000 /* this is heap-only tuple */
283 #define HEAP2_XACT_MASK 0xE000 /* visibility-related bits */
286 * HEAP_TUPLE_HAS_MATCH is a temporary flag used during hash joins. It is
287 * only used in tuples that are in the hash table, and those don't need
288 * any visibility information, so we can overlay it on a visibility flag
289 * instead of using up a dedicated bit.
291 #define HEAP_TUPLE_HAS_MATCH HEAP_ONLY_TUPLE /* tuple has a join match */
294 * HeapTupleHeader accessor macros
296 * Note: beware of multiple evaluations of "tup" argument. But the Set
297 * macros evaluate their other argument only once.
301 * HeapTupleHeaderGetRawXmin returns the "raw" xmin field, which is the xid
302 * originally used to insert the tuple. However, the tuple might actually
303 * be frozen (via HeapTupleHeaderSetXminFrozen) in which case the tuple's xmin
304 * is visible to every snapshot. Prior to PostgreSQL 9.4, we actually changed
305 * the xmin to FrozenTransactionId, and that value may still be encountered
308 #define HeapTupleHeaderGetRawXmin(tup) \
310 (tup)->t_choice.t_heap.t_xmin \
313 #define HeapTupleHeaderGetXmin(tup) \
315 HeapTupleHeaderXminFrozen(tup) ? \
316 FrozenTransactionId : HeapTupleHeaderGetRawXmin(tup) \
319 #define HeapTupleHeaderSetXmin(tup, xid) \
321 (tup)->t_choice.t_heap.t_xmin = (xid) \
324 #define HeapTupleHeaderXminCommitted(tup) \
326 ((tup)->t_infomask & HEAP_XMIN_COMMITTED) != 0 \
329 #define HeapTupleHeaderXminInvalid(tup) \
331 ((tup)->t_infomask & (HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID)) == \
335 #define HeapTupleHeaderXminFrozen(tup) \
337 ((tup)->t_infomask & (HEAP_XMIN_FROZEN)) == HEAP_XMIN_FROZEN \
340 #define HeapTupleHeaderSetXminCommitted(tup) \
342 AssertMacro(!HeapTupleHeaderXminInvalid(tup)), \
343 ((tup)->t_infomask |= HEAP_XMIN_COMMITTED) \
346 #define HeapTupleHeaderSetXminInvalid(tup) \
348 AssertMacro(!HeapTupleHeaderXminCommitted(tup)), \
349 ((tup)->t_infomask |= HEAP_XMIN_INVALID) \
352 #define HeapTupleHeaderSetXminFrozen(tup) \
354 AssertMacro(!HeapTupleHeaderXminInvalid(tup)), \
355 ((tup)->t_infomask |= HEAP_XMIN_FROZEN) \
359 * HeapTupleHeaderGetRawXmax gets you the raw Xmax field. To find out the Xid
360 * that updated a tuple, you might need to resolve the MultiXactId if certain
361 * bits are set. HeapTupleHeaderGetUpdateXid checks those bits and takes care
362 * to resolve the MultiXactId if necessary. This might involve multixact I/O,
363 * so it should only be used if absolutely necessary.
365 #define HeapTupleHeaderGetUpdateXid(tup) \
367 (!((tup)->t_infomask & HEAP_XMAX_INVALID) && \
368 ((tup)->t_infomask & HEAP_XMAX_IS_MULTI) && \
369 !((tup)->t_infomask & HEAP_XMAX_LOCK_ONLY)) ? \
370 HeapTupleGetUpdateXid(tup) \
372 HeapTupleHeaderGetRawXmax(tup) \
375 #define HeapTupleHeaderGetRawXmax(tup) \
377 (tup)->t_choice.t_heap.t_xmax \
380 #define HeapTupleHeaderSetXmax(tup, xid) \
382 (tup)->t_choice.t_heap.t_xmax = (xid) \
386 * HeapTupleHeaderGetRawCommandId will give you what's in the header whether
387 * it is useful or not. Most code should use HeapTupleHeaderGetCmin or
388 * HeapTupleHeaderGetCmax instead, but note that those Assert that you can
389 * get a legitimate result, ie you are in the originating transaction!
391 #define HeapTupleHeaderGetRawCommandId(tup) \
393 (tup)->t_choice.t_heap.t_field3.t_cid \
396 /* SetCmin is reasonably simple since we never need a combo CID */
397 #define HeapTupleHeaderSetCmin(tup, cid) \
399 Assert(!((tup)->t_infomask & HEAP_MOVED)); \
400 (tup)->t_choice.t_heap.t_field3.t_cid = (cid); \
401 (tup)->t_infomask &= ~HEAP_COMBOCID; \
404 /* SetCmax must be used after HeapTupleHeaderAdjustCmax; see combocid.c */
405 #define HeapTupleHeaderSetCmax(tup, cid, iscombo) \
407 Assert(!((tup)->t_infomask & HEAP_MOVED)); \
408 (tup)->t_choice.t_heap.t_field3.t_cid = (cid); \
410 (tup)->t_infomask |= HEAP_COMBOCID; \
412 (tup)->t_infomask &= ~HEAP_COMBOCID; \
415 #define HeapTupleHeaderGetXvac(tup) \
417 ((tup)->t_infomask & HEAP_MOVED) ? \
418 (tup)->t_choice.t_heap.t_field3.t_xvac \
420 InvalidTransactionId \
423 #define HeapTupleHeaderSetXvac(tup, xid) \
425 Assert((tup)->t_infomask & HEAP_MOVED); \
426 (tup)->t_choice.t_heap.t_field3.t_xvac = (xid); \
429 #define HeapTupleHeaderIsSpeculative(tup) \
431 (ItemPointerGetOffsetNumberNoCheck(&(tup)->t_ctid) == SpecTokenOffsetNumber) \
434 #define HeapTupleHeaderGetSpeculativeToken(tup) \
436 AssertMacro(HeapTupleHeaderIsSpeculative(tup)), \
437 ItemPointerGetBlockNumber(&(tup)->t_ctid) \
440 #define HeapTupleHeaderSetSpeculativeToken(tup, token) \
442 ItemPointerSet(&(tup)->t_ctid, token, SpecTokenOffsetNumber) \
445 #define HeapTupleHeaderIndicatesMovedPartitions(tup) \
446 ItemPointerIndicatesMovedPartitions(&(tup)->t_ctid)
448 #define HeapTupleHeaderSetMovedPartitions(tup) \
449 ItemPointerSetMovedPartitions(&(tup)->t_ctid)
451 #define HeapTupleHeaderGetDatumLength(tup) \
454 #define HeapTupleHeaderSetDatumLength(tup, len) \
455 SET_VARSIZE(tup, len)
457 #define HeapTupleHeaderGetTypeId(tup) \
459 (tup)->t_choice.t_datum.datum_typeid \
462 #define HeapTupleHeaderSetTypeId(tup, typeid) \
464 (tup)->t_choice.t_datum.datum_typeid = (typeid) \
467 #define HeapTupleHeaderGetTypMod(tup) \
469 (tup)->t_choice.t_datum.datum_typmod \
472 #define HeapTupleHeaderSetTypMod(tup, typmod) \
474 (tup)->t_choice.t_datum.datum_typmod = (typmod) \
478 * Note that we stop considering a tuple HOT-updated as soon as it is known
479 * aborted or the would-be updating transaction is known aborted. For best
480 * efficiency, check tuple visibility before using this macro, so that the
481 * INVALID bits will be as up to date as possible.
483 #define HeapTupleHeaderIsHotUpdated(tup) \
485 ((tup)->t_infomask2 & HEAP_HOT_UPDATED) != 0 && \
486 ((tup)->t_infomask & HEAP_XMAX_INVALID) == 0 && \
487 !HeapTupleHeaderXminInvalid(tup) \
490 #define HeapTupleHeaderSetHotUpdated(tup) \
492 (tup)->t_infomask2 |= HEAP_HOT_UPDATED \
495 #define HeapTupleHeaderClearHotUpdated(tup) \
497 (tup)->t_infomask2 &= ~HEAP_HOT_UPDATED \
500 #define HeapTupleHeaderIsHeapOnly(tup) \
502 ((tup)->t_infomask2 & HEAP_ONLY_TUPLE) != 0 \
505 #define HeapTupleHeaderSetHeapOnly(tup) \
507 (tup)->t_infomask2 |= HEAP_ONLY_TUPLE \
510 #define HeapTupleHeaderClearHeapOnly(tup) \
512 (tup)->t_infomask2 &= ~HEAP_ONLY_TUPLE \
515 #define HeapTupleHeaderHasMatch(tup) \
517 ((tup)->t_infomask2 & HEAP_TUPLE_HAS_MATCH) != 0 \
520 #define HeapTupleHeaderSetMatch(tup) \
522 (tup)->t_infomask2 |= HEAP_TUPLE_HAS_MATCH \
525 #define HeapTupleHeaderClearMatch(tup) \
527 (tup)->t_infomask2 &= ~HEAP_TUPLE_HAS_MATCH \
530 #define HeapTupleHeaderGetNatts(tup) \
531 ((tup)->t_infomask2 & HEAP_NATTS_MASK)
533 #define HeapTupleHeaderSetNatts(tup, natts) \
535 (tup)->t_infomask2 = ((tup)->t_infomask2 & ~HEAP_NATTS_MASK) | (natts) \
538 #define HeapTupleHeaderHasExternal(tup) \
539 (((tup)->t_infomask & HEAP_HASEXTERNAL) != 0)
544 * Computes size of null bitmap given number of data columns.
546 #define BITMAPLEN(NATTS) (((int)(NATTS) + 7) / 8)
549 * MaxHeapTupleSize is the maximum allowed size of a heap tuple, including
550 * header and MAXALIGN alignment padding. Basically it's BLCKSZ minus the
551 * other stuff that has to be on a disk page. Since heap pages use no
552 * "special space", there's no deduction for that.
554 * NOTE: we allow for the ItemId that must point to the tuple, ensuring that
555 * an otherwise-empty page can indeed hold a tuple of this size. Because
556 * ItemIds and tuples have different alignment requirements, don't assume that
557 * you can, say, fit 2 tuples of size MaxHeapTupleSize/2 on the same page.
559 #define MaxHeapTupleSize (BLCKSZ - MAXALIGN(SizeOfPageHeaderData + sizeof(ItemIdData)))
560 #define MinHeapTupleSize MAXALIGN(SizeofHeapTupleHeader)
563 * MaxHeapTuplesPerPage is an upper bound on the number of tuples that can
564 * fit on one heap page. (Note that indexes could have more, because they
565 * use a smaller tuple header.) We arrive at the divisor because each tuple
566 * must be maxaligned, and it must have an associated line pointer.
568 * Note: with HOT, there could theoretically be more line pointers (not actual
569 * tuples) than this on a heap page. However we constrain the number of line
570 * pointers to this anyway, to avoid excessive line-pointer bloat and not
571 * require increases in the size of work arrays.
573 #define MaxHeapTuplesPerPage \
574 ((int) ((BLCKSZ - SizeOfPageHeaderData) / \
575 (MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData))))
578 * MaxAttrSize is a somewhat arbitrary upper limit on the declared size of
579 * data fields of char(n) and similar types. It need not have anything
580 * directly to do with the *actual* upper limit of varlena values, which
581 * is currently 1Gb (see TOAST structures in postgres.h). I've set it
582 * at 10Mb which seems like a reasonable number --- tgl 8/6/00.
584 #define MaxAttrSize (10 * 1024 * 1024)
588 * MinimalTuple is an alternative representation that is used for transient
589 * tuples inside the executor, in places where transaction status information
590 * is not required, the tuple rowtype is known, and shaving off a few bytes
591 * is worthwhile because we need to store many tuples. The representation
592 * is chosen so that tuple access routines can work with either full or
593 * minimal tuples via a HeapTupleData pointer structure. The access routines
594 * see no difference, except that they must not access the transaction status
595 * or t_ctid fields because those aren't there.
597 * For the most part, MinimalTuples should be accessed via TupleTableSlot
598 * routines. These routines will prevent access to the "system columns"
599 * and thereby prevent accidental use of the nonexistent fields.
601 * MinimalTupleData contains a length word, some padding, and fields matching
602 * HeapTupleHeaderData beginning with t_infomask2. The padding is chosen so
603 * that offsetof(t_infomask2) is the same modulo MAXIMUM_ALIGNOF in both
604 * structs. This makes data alignment rules equivalent in both cases.
606 * When a minimal tuple is accessed via a HeapTupleData pointer, t_data is
607 * set to point MINIMAL_TUPLE_OFFSET bytes before the actual start of the
608 * minimal tuple --- that is, where a full tuple matching the minimal tuple's
609 * data would start. This trick is what makes the structs seem equivalent.
611 * Note that t_hoff is computed the same as in a full tuple, hence it includes
612 * the MINIMAL_TUPLE_OFFSET distance. t_len does not include that, however.
614 * MINIMAL_TUPLE_DATA_OFFSET is the offset to the first useful (non-pad) data
615 * other than the length word. tuplesort.c and tuplestore.c use this to avoid
616 * writing the padding to disk.
618 #define MINIMAL_TUPLE_OFFSET \
619 ((offsetof(HeapTupleHeaderData, t_infomask2) - sizeof(uint32)) / MAXIMUM_ALIGNOF * MAXIMUM_ALIGNOF)
620 #define MINIMAL_TUPLE_PADDING \
621 ((offsetof(HeapTupleHeaderData, t_infomask2) - sizeof(uint32)) % MAXIMUM_ALIGNOF)
622 #define MINIMAL_TUPLE_DATA_OFFSET \
623 offsetof(MinimalTupleData, t_infomask2)
625 struct MinimalTupleData
627 uint32 t_len
; /* actual length of minimal tuple */
629 char mt_padding
[MINIMAL_TUPLE_PADDING
];
631 /* Fields below here must match HeapTupleHeaderData! */
633 uint16 t_infomask2
; /* number of attributes + various flags */
635 uint16 t_infomask
; /* various flag bits, see below */
637 uint8 t_hoff
; /* sizeof header incl. bitmap, padding */
639 /* ^ - 23 bytes - ^ */
641 bits8 t_bits
[FLEXIBLE_ARRAY_MEMBER
]; /* bitmap of NULLs */
643 /* MORE DATA FOLLOWS AT END OF STRUCT */
646 /* typedef appears in htup.h */
648 #define SizeofMinimalTupleHeader offsetof(MinimalTupleData, t_bits)
652 * GETSTRUCT - given a HeapTuple pointer, return address of the user data
654 #define GETSTRUCT(TUP) ((char *) ((TUP)->t_data) + (TUP)->t_data->t_hoff)
657 * Accessor macros to be used with HeapTuple pointers.
660 #define HeapTupleHasNulls(tuple) \
661 (((tuple)->t_data->t_infomask & HEAP_HASNULL) != 0)
663 #define HeapTupleNoNulls(tuple) \
664 (!((tuple)->t_data->t_infomask & HEAP_HASNULL))
666 #define HeapTupleHasVarWidth(tuple) \
667 (((tuple)->t_data->t_infomask & HEAP_HASVARWIDTH) != 0)
669 #define HeapTupleAllFixed(tuple) \
670 (!((tuple)->t_data->t_infomask & HEAP_HASVARWIDTH))
672 #define HeapTupleHasExternal(tuple) \
673 (((tuple)->t_data->t_infomask & HEAP_HASEXTERNAL) != 0)
675 #define HeapTupleIsHotUpdated(tuple) \
676 HeapTupleHeaderIsHotUpdated((tuple)->t_data)
678 #define HeapTupleSetHotUpdated(tuple) \
679 HeapTupleHeaderSetHotUpdated((tuple)->t_data)
681 #define HeapTupleClearHotUpdated(tuple) \
682 HeapTupleHeaderClearHotUpdated((tuple)->t_data)
684 #define HeapTupleIsHeapOnly(tuple) \
685 HeapTupleHeaderIsHeapOnly((tuple)->t_data)
687 #define HeapTupleSetHeapOnly(tuple) \
688 HeapTupleHeaderSetHeapOnly((tuple)->t_data)
690 #define HeapTupleClearHeapOnly(tuple) \
691 HeapTupleHeaderClearHeapOnly((tuple)->t_data)
697 * Fetch a user attribute's value as a Datum (might be either a
698 * value, or a pointer into the data area of the tuple).
700 * This must not be used when a system attribute might be requested.
701 * Furthermore, the passed attnum MUST be valid. Use heap_getattr()
702 * instead, if in doubt.
704 * This gets called many times, so we macro the cacheable and NULL
705 * lookups, and call nocachegetattr() for the rest.
709 #if !defined(DISABLE_COMPLEX_MACRO)
711 #define fastgetattr(tup, attnum, tupleDesc, isnull) \
713 AssertMacro((attnum) > 0), \
714 (*(isnull) = false), \
715 HeapTupleNoNulls(tup) ? \
717 TupleDescAttr((tupleDesc), (attnum)-1)->attcacheoff >= 0 ? \
719 fetchatt(TupleDescAttr((tupleDesc), (attnum)-1), \
720 (char *) (tup)->t_data + (tup)->t_data->t_hoff + \
721 TupleDescAttr((tupleDesc), (attnum)-1)->attcacheoff)\
724 nocachegetattr((tup), (attnum), (tupleDesc)) \
728 att_isnull((attnum)-1, (tup)->t_data->t_bits) ? \
730 (*(isnull) = true), \
735 nocachegetattr((tup), (attnum), (tupleDesc)) \
739 #else /* defined(DISABLE_COMPLEX_MACRO) */
741 extern Datum
fastgetattr(HeapTuple tup
, int attnum
, TupleDesc tupleDesc
,
743 #endif /* defined(DISABLE_COMPLEX_MACRO) */
749 * Extract an attribute of a heap tuple and return it as a Datum.
750 * This works for either system or user attributes. The given attnum
751 * is properly range-checked.
753 * If the field in question has a NULL value, we return a zero Datum
754 * and set *isnull == true. Otherwise, we set *isnull == false.
756 * <tup> is the pointer to the heap tuple. <attnum> is the attribute
757 * number of the column (field) caller wants. <tupleDesc> is a
758 * pointer to the structure describing the row and all its fields.
761 #define heap_getattr(tup, attnum, tupleDesc, isnull) \
765 ((attnum) > (int) HeapTupleHeaderGetNatts((tup)->t_data)) ? \
766 getmissingattr((tupleDesc), (attnum), (isnull)) \
768 fastgetattr((tup), (attnum), (tupleDesc), (isnull)) \
771 heap_getsysattr((tup), (attnum), (tupleDesc), (isnull)) \
775 /* prototypes for functions in common/heaptuple.c */
776 extern Size
heap_compute_data_size(TupleDesc tupleDesc
,
777 Datum
*values
, bool *isnull
);
778 extern void heap_fill_tuple(TupleDesc tupleDesc
,
779 Datum
*values
, bool *isnull
,
780 char *data
, Size data_size
,
781 uint16
*infomask
, bits8
*bit
);
782 extern bool heap_attisnull(HeapTuple tup
, int attnum
, TupleDesc tupleDesc
);
783 extern Datum
nocachegetattr(HeapTuple tup
, int attnum
,
785 extern Datum
heap_getsysattr(HeapTuple tup
, int attnum
, TupleDesc tupleDesc
,
787 extern Datum
getmissingattr(TupleDesc tupleDesc
,
788 int attnum
, bool *isnull
);
789 extern HeapTuple
heap_copytuple(HeapTuple tuple
);
790 extern void heap_copytuple_with_tuple(HeapTuple src
, HeapTuple dest
);
791 extern Datum
heap_copy_tuple_as_datum(HeapTuple tuple
, TupleDesc tupleDesc
);
792 extern HeapTuple
heap_form_tuple(TupleDesc tupleDescriptor
,
793 Datum
*values
, bool *isnull
);
794 extern HeapTuple
heap_modify_tuple(HeapTuple tuple
,
799 extern HeapTuple
heap_modify_tuple_by_cols(HeapTuple tuple
,
805 extern void heap_deform_tuple(HeapTuple tuple
, TupleDesc tupleDesc
,
806 Datum
*values
, bool *isnull
);
807 extern void heap_freetuple(HeapTuple htup
);
808 extern MinimalTuple
heap_form_minimal_tuple(TupleDesc tupleDescriptor
,
809 Datum
*values
, bool *isnull
);
810 extern void heap_free_minimal_tuple(MinimalTuple mtup
);
811 extern MinimalTuple
heap_copy_minimal_tuple(MinimalTuple mtup
);
812 extern HeapTuple
heap_tuple_from_minimal_tuple(MinimalTuple mtup
);
813 extern MinimalTuple
minimal_tuple_from_heap_tuple(HeapTuple htup
);
814 extern size_t varsize_any(void *p
);
815 extern HeapTuple
heap_expand_tuple(HeapTuple sourceTuple
, TupleDesc tupleDesc
);
816 extern MinimalTuple
minimal_expand_tuple(HeapTuple sourceTuple
, TupleDesc tupleDesc
);
818 #endif /* HTUP_DETAILS_H */