Standardize rmgrdesc recovery conflict XID output.
[pgsql.git] / src / include / access / heapam_xlog.h
blob5c77290eecff70f087381cba02887c2784f436bf
1 /*-------------------------------------------------------------------------
3 * heapam_xlog.h
4 * POSTGRES heap access XLOG definitions.
7 * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
10 * src/include/access/heapam_xlog.h
12 *-------------------------------------------------------------------------
14 #ifndef HEAPAM_XLOG_H
15 #define HEAPAM_XLOG_H
17 #include "access/htup.h"
18 #include "access/xlogreader.h"
19 #include "lib/stringinfo.h"
20 #include "storage/buf.h"
21 #include "storage/bufpage.h"
22 #include "storage/relfilelocator.h"
23 #include "utils/relcache.h"
27 * WAL record definitions for heapam.c's WAL operations
29 * XLOG allows to store some information in high 4 bits of log
30 * record xl_info field. We use 3 for opcode and one for init bit.
32 #define XLOG_HEAP_INSERT 0x00
33 #define XLOG_HEAP_DELETE 0x10
34 #define XLOG_HEAP_UPDATE 0x20
35 #define XLOG_HEAP_TRUNCATE 0x30
36 #define XLOG_HEAP_HOT_UPDATE 0x40
37 #define XLOG_HEAP_CONFIRM 0x50
38 #define XLOG_HEAP_LOCK 0x60
39 #define XLOG_HEAP_INPLACE 0x70
41 #define XLOG_HEAP_OPMASK 0x70
43 * When we insert 1st item on new page in INSERT, UPDATE, HOT_UPDATE,
44 * or MULTI_INSERT, we can (and we do) restore entire page in redo
46 #define XLOG_HEAP_INIT_PAGE 0x80
48 * We ran out of opcodes, so heapam.c now has a second RmgrId. These opcodes
49 * are associated with RM_HEAP2_ID, but are not logically different from
50 * the ones above associated with RM_HEAP_ID. XLOG_HEAP_OPMASK applies to
51 * these, too.
53 #define XLOG_HEAP2_REWRITE 0x00
54 #define XLOG_HEAP2_PRUNE 0x10
55 #define XLOG_HEAP2_VACUUM 0x20
56 #define XLOG_HEAP2_FREEZE_PAGE 0x30
57 #define XLOG_HEAP2_VISIBLE 0x40
58 #define XLOG_HEAP2_MULTI_INSERT 0x50
59 #define XLOG_HEAP2_LOCK_UPDATED 0x60
60 #define XLOG_HEAP2_NEW_CID 0x70
63 * xl_heap_insert/xl_heap_multi_insert flag values, 8 bits are available.
65 /* PD_ALL_VISIBLE was cleared */
66 #define XLH_INSERT_ALL_VISIBLE_CLEARED (1<<0)
67 #define XLH_INSERT_LAST_IN_MULTI (1<<1)
68 #define XLH_INSERT_IS_SPECULATIVE (1<<2)
69 #define XLH_INSERT_CONTAINS_NEW_TUPLE (1<<3)
70 #define XLH_INSERT_ON_TOAST_RELATION (1<<4)
72 /* all_frozen_set always implies all_visible_set */
73 #define XLH_INSERT_ALL_FROZEN_SET (1<<5)
76 * xl_heap_update flag values, 8 bits are available.
78 /* PD_ALL_VISIBLE was cleared */
79 #define XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED (1<<0)
80 /* PD_ALL_VISIBLE was cleared in the 2nd page */
81 #define XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED (1<<1)
82 #define XLH_UPDATE_CONTAINS_OLD_TUPLE (1<<2)
83 #define XLH_UPDATE_CONTAINS_OLD_KEY (1<<3)
84 #define XLH_UPDATE_CONTAINS_NEW_TUPLE (1<<4)
85 #define XLH_UPDATE_PREFIX_FROM_OLD (1<<5)
86 #define XLH_UPDATE_SUFFIX_FROM_OLD (1<<6)
88 /* convenience macro for checking whether any form of old tuple was logged */
89 #define XLH_UPDATE_CONTAINS_OLD \
90 (XLH_UPDATE_CONTAINS_OLD_TUPLE | XLH_UPDATE_CONTAINS_OLD_KEY)
93 * xl_heap_delete flag values, 8 bits are available.
95 /* PD_ALL_VISIBLE was cleared */
96 #define XLH_DELETE_ALL_VISIBLE_CLEARED (1<<0)
97 #define XLH_DELETE_CONTAINS_OLD_TUPLE (1<<1)
98 #define XLH_DELETE_CONTAINS_OLD_KEY (1<<2)
99 #define XLH_DELETE_IS_SUPER (1<<3)
100 #define XLH_DELETE_IS_PARTITION_MOVE (1<<4)
102 /* convenience macro for checking whether any form of old tuple was logged */
103 #define XLH_DELETE_CONTAINS_OLD \
104 (XLH_DELETE_CONTAINS_OLD_TUPLE | XLH_DELETE_CONTAINS_OLD_KEY)
106 /* This is what we need to know about delete */
107 typedef struct xl_heap_delete
109 TransactionId xmax; /* xmax of the deleted tuple */
110 OffsetNumber offnum; /* deleted tuple's offset */
111 uint8 infobits_set; /* infomask bits */
112 uint8 flags;
113 } xl_heap_delete;
115 #define SizeOfHeapDelete (offsetof(xl_heap_delete, flags) + sizeof(uint8))
118 * xl_heap_truncate flag values, 8 bits are available.
120 #define XLH_TRUNCATE_CASCADE (1<<0)
121 #define XLH_TRUNCATE_RESTART_SEQS (1<<1)
124 * For truncate we list all truncated relids in an array, followed by all
125 * sequence relids that need to be restarted, if any.
126 * All rels are always within the same database, so we just list dbid once.
128 typedef struct xl_heap_truncate
130 Oid dbId;
131 uint32 nrelids;
132 uint8 flags;
133 Oid relids[FLEXIBLE_ARRAY_MEMBER];
134 } xl_heap_truncate;
136 #define SizeOfHeapTruncate (offsetof(xl_heap_truncate, relids))
139 * We don't store the whole fixed part (HeapTupleHeaderData) of an inserted
140 * or updated tuple in WAL; we can save a few bytes by reconstructing the
141 * fields that are available elsewhere in the WAL record, or perhaps just
142 * plain needn't be reconstructed. These are the fields we must store.
144 typedef struct xl_heap_header
146 uint16 t_infomask2;
147 uint16 t_infomask;
148 uint8 t_hoff;
149 } xl_heap_header;
151 #define SizeOfHeapHeader (offsetof(xl_heap_header, t_hoff) + sizeof(uint8))
153 /* This is what we need to know about insert */
154 typedef struct xl_heap_insert
156 OffsetNumber offnum; /* inserted tuple's offset */
157 uint8 flags;
159 /* xl_heap_header & TUPLE DATA in backup block 0 */
160 } xl_heap_insert;
162 #define SizeOfHeapInsert (offsetof(xl_heap_insert, flags) + sizeof(uint8))
165 * This is what we need to know about a multi-insert.
167 * The main data of the record consists of this xl_heap_multi_insert header.
168 * 'offsets' array is omitted if the whole page is reinitialized
169 * (XLOG_HEAP_INIT_PAGE).
171 * In block 0's data portion, there is an xl_multi_insert_tuple struct,
172 * followed by the tuple data for each tuple. There is padding to align
173 * each xl_multi_insert_tuple struct.
175 typedef struct xl_heap_multi_insert
177 uint8 flags;
178 uint16 ntuples;
179 OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER];
180 } xl_heap_multi_insert;
182 #define SizeOfHeapMultiInsert offsetof(xl_heap_multi_insert, offsets)
184 typedef struct xl_multi_insert_tuple
186 uint16 datalen; /* size of tuple data that follows */
187 uint16 t_infomask2;
188 uint16 t_infomask;
189 uint8 t_hoff;
190 /* TUPLE DATA FOLLOWS AT END OF STRUCT */
191 } xl_multi_insert_tuple;
193 #define SizeOfMultiInsertTuple (offsetof(xl_multi_insert_tuple, t_hoff) + sizeof(uint8))
196 * This is what we need to know about update|hot_update
198 * Backup blk 0: new page
200 * If XLH_UPDATE_PREFIX_FROM_OLD or XLH_UPDATE_SUFFIX_FROM_OLD flags are set,
201 * the prefix and/or suffix come first, as one or two uint16s.
203 * After that, xl_heap_header and new tuple data follow. The new tuple
204 * data doesn't include the prefix and suffix, which are copied from the
205 * old tuple on replay.
207 * If XLH_UPDATE_CONTAINS_NEW_TUPLE flag is given, the tuple data is
208 * included even if a full-page image was taken.
210 * Backup blk 1: old page, if different. (no data, just a reference to the blk)
212 typedef struct xl_heap_update
214 TransactionId old_xmax; /* xmax of the old tuple */
215 OffsetNumber old_offnum; /* old tuple's offset */
216 uint8 old_infobits_set; /* infomask bits to set on old tuple */
217 uint8 flags;
218 TransactionId new_xmax; /* xmax of the new tuple */
219 OffsetNumber new_offnum; /* new tuple's offset */
222 * If XLH_UPDATE_CONTAINS_OLD_TUPLE or XLH_UPDATE_CONTAINS_OLD_KEY flags
223 * are set, xl_heap_header and tuple data for the old tuple follow.
225 } xl_heap_update;
227 #define SizeOfHeapUpdate (offsetof(xl_heap_update, new_offnum) + sizeof(OffsetNumber))
230 * This is what we need to know about page pruning (both during VACUUM and
231 * during opportunistic pruning)
233 * The array of OffsetNumbers following the fixed part of the record contains:
234 * * for each redirected item: the item offset, then the offset redirected to
235 * * for each now-dead item: the item offset
236 * * for each now-unused item: the item offset
237 * The total number of OffsetNumbers is therefore 2*nredirected+ndead+nunused.
238 * Note that nunused is not explicitly stored, but may be found by reference
239 * to the total record length.
241 * Acquires a full cleanup lock.
243 typedef struct xl_heap_prune
245 TransactionId snapshotConflictHorizon;
246 uint16 nredirected;
247 uint16 ndead;
248 /* OFFSET NUMBERS are in the block reference 0 */
249 } xl_heap_prune;
251 #define SizeOfHeapPrune (offsetof(xl_heap_prune, ndead) + sizeof(uint16))
254 * The vacuum page record is similar to the prune record, but can only mark
255 * already LP_DEAD items LP_UNUSED (during VACUUM's second heap pass)
257 * Acquires an ordinary exclusive lock only.
259 typedef struct xl_heap_vacuum
261 uint16 nunused;
262 /* OFFSET NUMBERS are in the block reference 0 */
263 } xl_heap_vacuum;
265 #define SizeOfHeapVacuum (offsetof(xl_heap_vacuum, nunused) + sizeof(uint16))
267 /* flags for infobits_set */
268 #define XLHL_XMAX_IS_MULTI 0x01
269 #define XLHL_XMAX_LOCK_ONLY 0x02
270 #define XLHL_XMAX_EXCL_LOCK 0x04
271 #define XLHL_XMAX_KEYSHR_LOCK 0x08
272 #define XLHL_KEYS_UPDATED 0x10
274 /* flag bits for xl_heap_lock / xl_heap_lock_updated's flag field */
275 #define XLH_LOCK_ALL_FROZEN_CLEARED 0x01
277 /* This is what we need to know about lock */
278 typedef struct xl_heap_lock
280 TransactionId locking_xid; /* might be a MultiXactId not xid */
281 OffsetNumber offnum; /* locked tuple's offset on page */
282 int8 infobits_set; /* infomask and infomask2 bits to set */
283 uint8 flags; /* XLH_LOCK_* flag bits */
284 } xl_heap_lock;
286 #define SizeOfHeapLock (offsetof(xl_heap_lock, flags) + sizeof(int8))
288 /* This is what we need to know about locking an updated version of a row */
289 typedef struct xl_heap_lock_updated
291 TransactionId xmax;
292 OffsetNumber offnum;
293 uint8 infobits_set;
294 uint8 flags;
295 } xl_heap_lock_updated;
297 #define SizeOfHeapLockUpdated (offsetof(xl_heap_lock_updated, flags) + sizeof(uint8))
299 /* This is what we need to know about confirmation of speculative insertion */
300 typedef struct xl_heap_confirm
302 OffsetNumber offnum; /* confirmed tuple's offset on page */
303 } xl_heap_confirm;
305 #define SizeOfHeapConfirm (offsetof(xl_heap_confirm, offnum) + sizeof(OffsetNumber))
307 /* This is what we need to know about in-place update */
308 typedef struct xl_heap_inplace
310 OffsetNumber offnum; /* updated tuple's offset on page */
311 /* TUPLE DATA FOLLOWS AT END OF STRUCT */
312 } xl_heap_inplace;
314 #define SizeOfHeapInplace (offsetof(xl_heap_inplace, offnum) + sizeof(OffsetNumber))
317 * This struct represents a 'freeze plan', which describes how to freeze a
318 * group of one or more heap tuples (appears in xl_heap_freeze_page record)
320 /* 0x01 was XLH_FREEZE_XMIN */
321 #define XLH_FREEZE_XVAC 0x02
322 #define XLH_INVALID_XVAC 0x04
324 typedef struct xl_heap_freeze_plan
326 TransactionId xmax;
327 uint16 t_infomask2;
328 uint16 t_infomask;
329 uint8 frzflags;
331 /* Length of individual page offset numbers array for this plan */
332 uint16 ntuples;
333 } xl_heap_freeze_plan;
336 * This is what we need to know about a block being frozen during vacuum
338 * Backup block 0's data contains an array of xl_heap_freeze_plan structs
339 * (with nplans elements), followed by one or more page offset number arrays.
340 * Each such page offset number array corresponds to a single freeze plan
341 * (REDO routine freezes corresponding heap tuples using freeze plan).
343 typedef struct xl_heap_freeze_page
345 TransactionId snapshotConflictHorizon;
346 uint16 nplans;
348 /* FREEZE PLANS FOLLOW */
349 /* OFFSET NUMBER ARRAY FOLLOWS */
350 } xl_heap_freeze_page;
352 #define SizeOfHeapFreezePage (offsetof(xl_heap_freeze_page, nplans) + sizeof(uint16))
355 * This is what we need to know about setting a visibility map bit
357 * Backup blk 0: visibility map buffer
358 * Backup blk 1: heap buffer
360 typedef struct xl_heap_visible
362 TransactionId snapshotConflictHorizon;
363 uint8 flags;
364 } xl_heap_visible;
366 #define SizeOfHeapVisible (offsetof(xl_heap_visible, flags) + sizeof(uint8))
368 typedef struct xl_heap_new_cid
371 * store toplevel xid so we don't have to merge cids from different
372 * transactions
374 TransactionId top_xid;
375 CommandId cmin;
376 CommandId cmax;
377 CommandId combocid; /* just for debugging */
380 * Store the relfilelocator/ctid pair to facilitate lookups.
382 RelFileLocator target_locator;
383 ItemPointerData target_tid;
384 } xl_heap_new_cid;
386 #define SizeOfHeapNewCid (offsetof(xl_heap_new_cid, target_tid) + sizeof(ItemPointerData))
388 /* logical rewrite xlog record header */
389 typedef struct xl_heap_rewrite_mapping
391 TransactionId mapped_xid; /* xid that might need to see the row */
392 Oid mapped_db; /* DbOid or InvalidOid for shared rels */
393 Oid mapped_rel; /* Oid of the mapped relation */
394 off_t offset; /* How far have we written so far */
395 uint32 num_mappings; /* Number of in-memory mappings */
396 XLogRecPtr start_lsn; /* Insert LSN at begin of rewrite */
397 } xl_heap_rewrite_mapping;
399 extern void HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple,
400 TransactionId *snapshotConflictHorizon);
402 extern void heap_redo(XLogReaderState *record);
403 extern void heap_desc(StringInfo buf, XLogReaderState *record);
404 extern const char *heap_identify(uint8 info);
405 extern void heap_mask(char *pagedata, BlockNumber blkno);
406 extern void heap2_redo(XLogReaderState *record);
407 extern void heap2_desc(StringInfo buf, XLogReaderState *record);
408 extern const char *heap2_identify(uint8 info);
409 extern void heap_xlog_logical_rewrite(XLogReaderState *r);
411 extern XLogRecPtr log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer,
412 Buffer vm_buffer,
413 TransactionId snapshotConflictHorizon,
414 uint8 vmflags);
416 #endif /* HEAPAM_XLOG_H */