1 /*-------------------------------------------------------------------------
4 * heap access method code
6 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
15 * relation_open - open any relation by relation OID
16 * relation_openrv - open any relation specified by a RangeVar
17 * relation_close - close any relation
18 * heap_open - open a heap relation by relation OID
19 * heap_openrv - open a heap relation specified by a RangeVar
20 * heap_close - (now just a macro for relation_close)
21 * heap_beginscan - begin relation scan
22 * heap_rescan - restart a relation scan
23 * heap_endscan - end relation scan
24 * heap_getnext - retrieve next tuple in scan
25 * heap_fetch - retrieve tuple with given tid
26 * heap_insert - insert tuple into a relation
27 * heap_delete - delete a tuple from a relation
28 * heap_update - replace a tuple in a relation with another tuple
29 * heap_markpos - mark scan position
30 * heap_restrpos - restore position to marked location
31 * heap_sync - sync heap, for when no WAL has been written
34 * This file contains the heap_ routines which implement
35 * the POSTGRES heap access method used for all POSTGRES
38 *-------------------------------------------------------------------------
42 #include "access/heapam.h"
43 #include "access/hio.h"
44 #include "access/multixact.h"
45 #include "access/relscan.h"
46 #include "access/sysattr.h"
47 #include "access/transam.h"
48 #include "access/tuptoaster.h"
49 #include "access/valid.h"
50 #include "access/xact.h"
51 #include "access/xlogutils.h"
52 #include "catalog/catalog.h"
53 #include "catalog/namespace.h"
54 #include "miscadmin.h"
56 #include "storage/bufmgr.h"
57 #include "storage/lmgr.h"
58 #include "storage/procarray.h"
59 #include "storage/smgr.h"
60 #include "utils/datum.h"
61 #include "utils/inval.h"
62 #include "utils/lsyscache.h"
63 #include "utils/relcache.h"
64 #include "utils/snapmgr.h"
65 #include "utils/syscache.h"
66 #include "utils/tqual.h"
70 bool synchronize_seqscans
= true;
73 static HeapScanDesc
heap_beginscan_internal(Relation relation
,
75 int nkeys
, ScanKey key
,
76 bool allow_strat
, bool allow_sync
,
78 static XLogRecPtr
log_heap_update(Relation reln
, Buffer oldbuf
,
79 ItemPointerData from
, Buffer newbuf
, HeapTuple newtup
, bool move
);
80 static bool HeapSatisfiesHOTUpdate(Relation relation
, Bitmapset
*hot_attrs
,
81 HeapTuple oldtup
, HeapTuple newtup
);
84 /* ----------------------------------------------------------------
85 * heap support routines
86 * ----------------------------------------------------------------
90 * initscan - scan code common to heap_beginscan and heap_rescan
94 initscan(HeapScanDesc scan
, ScanKey key
)
100 * Determine the number of blocks we have to scan.
102 * It is sufficient to do this once at scan start, since any tuples added
103 * while the scan is in progress will be invisible to my snapshot anyway.
104 * (That is not true when using a non-MVCC snapshot. However, we couldn't
105 * guarantee to return tuples added after scan start anyway, since they
106 * might go into pages we already scanned. To guarantee consistent
107 * results for a non-MVCC snapshot, the caller must hold some higher-level
108 * lock that ensures the interesting tuple(s) won't change.)
110 scan
->rs_nblocks
= RelationGetNumberOfBlocks(scan
->rs_rd
);
113 * If the table is large relative to NBuffers, use a bulk-read access
114 * strategy and enable synchronized scanning (see syncscan.c). Although
115 * the thresholds for these features could be different, we make them the
116 * same so that there are only two behaviors to tune rather than four.
117 * (However, some callers need to be able to disable one or both of
118 * these behaviors, independently of the size of the table; also there
119 * is a GUC variable that can disable synchronized scanning.)
121 * During a rescan, don't make a new strategy object if we don't have to.
123 if (!scan
->rs_rd
->rd_istemp
&&
124 scan
->rs_nblocks
> NBuffers
/ 4)
126 allow_strat
= scan
->rs_allow_strat
;
127 allow_sync
= scan
->rs_allow_sync
;
130 allow_strat
= allow_sync
= false;
134 if (scan
->rs_strategy
== NULL
)
135 scan
->rs_strategy
= GetAccessStrategy(BAS_BULKREAD
);
139 if (scan
->rs_strategy
!= NULL
)
140 FreeAccessStrategy(scan
->rs_strategy
);
141 scan
->rs_strategy
= NULL
;
144 if (allow_sync
&& synchronize_seqscans
)
146 scan
->rs_syncscan
= true;
147 scan
->rs_startblock
= ss_get_location(scan
->rs_rd
, scan
->rs_nblocks
);
151 scan
->rs_syncscan
= false;
152 scan
->rs_startblock
= 0;
155 scan
->rs_inited
= false;
156 scan
->rs_ctup
.t_data
= NULL
;
157 ItemPointerSetInvalid(&scan
->rs_ctup
.t_self
);
158 scan
->rs_cbuf
= InvalidBuffer
;
159 scan
->rs_cblock
= InvalidBlockNumber
;
161 /* we don't have a marked position... */
162 ItemPointerSetInvalid(&(scan
->rs_mctid
));
164 /* page-at-a-time fields are always invalid when not rs_inited */
167 * copy the scan key, if appropriate
170 memcpy(scan
->rs_key
, key
, scan
->rs_nkeys
* sizeof(ScanKeyData
));
173 * Currently, we don't have a stats counter for bitmap heap scans (but the
174 * underlying bitmap index scans will be counted).
176 if (!scan
->rs_bitmapscan
)
177 pgstat_count_heap_scan(scan
->rs_rd
);
181 * heapgetpage - subroutine for heapgettup()
183 * This routine reads and pins the specified page of the relation.
184 * In page-at-a-time mode it performs additional work, namely determining
185 * which tuples on the page are visible.
188 heapgetpage(HeapScanDesc scan
, BlockNumber page
)
195 OffsetNumber lineoff
;
198 Assert(page
< scan
->rs_nblocks
);
200 /* release previous scan buffer, if any */
201 if (BufferIsValid(scan
->rs_cbuf
))
203 ReleaseBuffer(scan
->rs_cbuf
);
204 scan
->rs_cbuf
= InvalidBuffer
;
207 /* read page using selected strategy */
208 scan
->rs_cbuf
= ReadBufferWithStrategy(scan
->rs_rd
,
211 scan
->rs_cblock
= page
;
213 if (!scan
->rs_pageatatime
)
216 buffer
= scan
->rs_cbuf
;
217 snapshot
= scan
->rs_snapshot
;
220 * Prune and repair fragmentation for the whole page, if possible.
222 Assert(TransactionIdIsValid(RecentGlobalXmin
));
223 heap_page_prune_opt(scan
->rs_rd
, buffer
, RecentGlobalXmin
);
226 * We must hold share lock on the buffer content while examining tuple
227 * visibility. Afterwards, however, the tuples we have found to be
228 * visible are guaranteed good as long as we hold the buffer pin.
230 LockBuffer(buffer
, BUFFER_LOCK_SHARE
);
232 dp
= (Page
) BufferGetPage(buffer
);
233 lines
= PageGetMaxOffsetNumber(dp
);
236 for (lineoff
= FirstOffsetNumber
, lpp
= PageGetItemId(dp
, lineoff
);
240 if (ItemIdIsNormal(lpp
))
242 HeapTupleData loctup
;
245 loctup
.t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lpp
);
246 loctup
.t_len
= ItemIdGetLength(lpp
);
247 ItemPointerSet(&(loctup
.t_self
), page
, lineoff
);
249 valid
= HeapTupleSatisfiesVisibility(&loctup
, snapshot
, buffer
);
251 scan
->rs_vistuples
[ntup
++] = lineoff
;
255 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
257 Assert(ntup
<= MaxHeapTuplesPerPage
);
258 scan
->rs_ntuples
= ntup
;
262 * heapgettup - fetch next heap tuple
264 * Initialize the scan if not already done; then advance to the next
265 * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
266 * or set scan->rs_ctup.t_data = NULL if no more tuples.
268 * dir == NoMovementScanDirection means "re-fetch the tuple indicated
271 * Note: the reason nkeys/key are passed separately, even though they are
272 * kept in the scan descriptor, is that the caller may not want us to check
275 * Note: when we fall off the end of the scan in either direction, we
276 * reset rs_inited. This means that a further request with the same
277 * scan direction will restart the scan, which is a bit odd, but a
278 * request with the opposite scan direction will start a fresh scan
279 * in the proper direction. The latter is required behavior for cursors,
280 * while the former case is generally undefined behavior in Postgres
281 * so we don't care too much.
285 heapgettup(HeapScanDesc scan
,
290 HeapTuple tuple
= &(scan
->rs_ctup
);
291 Snapshot snapshot
= scan
->rs_snapshot
;
292 bool backward
= ScanDirectionIsBackward(dir
);
297 OffsetNumber lineoff
;
302 * calculate next starting lineoff, given scan direction
304 if (ScanDirectionIsForward(dir
))
306 if (!scan
->rs_inited
)
309 * return null immediately if relation is empty
311 if (scan
->rs_nblocks
== 0)
313 Assert(!BufferIsValid(scan
->rs_cbuf
));
314 tuple
->t_data
= NULL
;
317 page
= scan
->rs_startblock
; /* first page */
318 heapgetpage(scan
, page
);
319 lineoff
= FirstOffsetNumber
; /* first offnum */
320 scan
->rs_inited
= true;
324 /* continue from previously returned page/tuple */
325 page
= scan
->rs_cblock
; /* current page */
326 lineoff
= /* next offnum */
327 OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple
->t_self
)));
330 LockBuffer(scan
->rs_cbuf
, BUFFER_LOCK_SHARE
);
332 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
333 lines
= PageGetMaxOffsetNumber(dp
);
334 /* page and lineoff now reference the physically next tid */
336 linesleft
= lines
- lineoff
+ 1;
340 if (!scan
->rs_inited
)
343 * return null immediately if relation is empty
345 if (scan
->rs_nblocks
== 0)
347 Assert(!BufferIsValid(scan
->rs_cbuf
));
348 tuple
->t_data
= NULL
;
353 * Disable reporting to syncscan logic in a backwards scan; it's
354 * not very likely anyone else is doing the same thing at the same
355 * time, and much more likely that we'll just bollix things for
358 scan
->rs_syncscan
= false;
359 /* start from last page of the scan */
360 if (scan
->rs_startblock
> 0)
361 page
= scan
->rs_startblock
- 1;
363 page
= scan
->rs_nblocks
- 1;
364 heapgetpage(scan
, page
);
368 /* continue from previously returned page/tuple */
369 page
= scan
->rs_cblock
; /* current page */
372 LockBuffer(scan
->rs_cbuf
, BUFFER_LOCK_SHARE
);
374 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
375 lines
= PageGetMaxOffsetNumber(dp
);
377 if (!scan
->rs_inited
)
379 lineoff
= lines
; /* final offnum */
380 scan
->rs_inited
= true;
384 lineoff
= /* previous offnum */
385 OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple
->t_self
)));
387 /* page and lineoff now reference the physically previous tid */
394 * ``no movement'' scan direction: refetch prior tuple
396 if (!scan
->rs_inited
)
398 Assert(!BufferIsValid(scan
->rs_cbuf
));
399 tuple
->t_data
= NULL
;
403 page
= ItemPointerGetBlockNumber(&(tuple
->t_self
));
404 if (page
!= scan
->rs_cblock
)
405 heapgetpage(scan
, page
);
407 /* Since the tuple was previously fetched, needn't lock page here */
408 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
409 lineoff
= ItemPointerGetOffsetNumber(&(tuple
->t_self
));
410 lpp
= PageGetItemId(dp
, lineoff
);
411 Assert(ItemIdIsNormal(lpp
));
413 tuple
->t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lpp
);
414 tuple
->t_len
= ItemIdGetLength(lpp
);
420 * advance the scan until we find a qualifying tuple or run out of stuff
423 lpp
= PageGetItemId(dp
, lineoff
);
426 while (linesleft
> 0)
428 if (ItemIdIsNormal(lpp
))
432 tuple
->t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lpp
);
433 tuple
->t_len
= ItemIdGetLength(lpp
);
434 ItemPointerSet(&(tuple
->t_self
), page
, lineoff
);
437 * if current tuple qualifies, return it.
439 valid
= HeapTupleSatisfiesVisibility(tuple
,
443 if (valid
&& key
!= NULL
)
444 HeapKeyTest(tuple
, RelationGetDescr(scan
->rs_rd
),
449 LockBuffer(scan
->rs_cbuf
, BUFFER_LOCK_UNLOCK
);
455 * otherwise move to the next item on the page
460 --lpp
; /* move back in this page's ItemId array */
465 ++lpp
; /* move forward in this page's ItemId array */
471 * if we get here, it means we've exhausted the items on this page and
472 * it's time to move to the next.
474 LockBuffer(scan
->rs_cbuf
, BUFFER_LOCK_UNLOCK
);
477 * advance to next/prior page and detect end of scan
481 finished
= (page
== scan
->rs_startblock
);
483 page
= scan
->rs_nblocks
;
489 if (page
>= scan
->rs_nblocks
)
491 finished
= (page
== scan
->rs_startblock
);
494 * Report our new scan position for synchronization purposes. We
495 * don't do that when moving backwards, however. That would just
496 * mess up any other forward-moving scanners.
498 * Note: we do this before checking for end of scan so that the
499 * final state of the position hint is back at the start of the
500 * rel. That's not strictly necessary, but otherwise when you run
501 * the same query multiple times the starting position would shift
502 * a little bit backwards on every invocation, which is confusing.
503 * We don't guarantee any specific ordering in general, though.
505 if (scan
->rs_syncscan
)
506 ss_report_location(scan
->rs_rd
, page
);
510 * return NULL if we've exhausted all the pages
514 if (BufferIsValid(scan
->rs_cbuf
))
515 ReleaseBuffer(scan
->rs_cbuf
);
516 scan
->rs_cbuf
= InvalidBuffer
;
517 scan
->rs_cblock
= InvalidBlockNumber
;
518 tuple
->t_data
= NULL
;
519 scan
->rs_inited
= false;
523 heapgetpage(scan
, page
);
525 LockBuffer(scan
->rs_cbuf
, BUFFER_LOCK_SHARE
);
527 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
528 lines
= PageGetMaxOffsetNumber((Page
) dp
);
533 lpp
= PageGetItemId(dp
, lines
);
537 lineoff
= FirstOffsetNumber
;
538 lpp
= PageGetItemId(dp
, FirstOffsetNumber
);
544 * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
546 * Same API as heapgettup, but used in page-at-a-time mode
548 * The internal logic is much the same as heapgettup's too, but there are some
549 * differences: we do not take the buffer content lock (that only needs to
550 * happen inside heapgetpage), and we iterate through just the tuples listed
551 * in rs_vistuples[] rather than all tuples on the page. Notice that
552 * lineindex is 0-based, where the corresponding loop variable lineoff in
553 * heapgettup is 1-based.
557 heapgettup_pagemode(HeapScanDesc scan
,
562 HeapTuple tuple
= &(scan
->rs_ctup
);
563 bool backward
= ScanDirectionIsBackward(dir
);
569 OffsetNumber lineoff
;
574 * calculate next starting lineindex, given scan direction
576 if (ScanDirectionIsForward(dir
))
578 if (!scan
->rs_inited
)
581 * return null immediately if relation is empty
583 if (scan
->rs_nblocks
== 0)
585 Assert(!BufferIsValid(scan
->rs_cbuf
));
586 tuple
->t_data
= NULL
;
589 page
= scan
->rs_startblock
; /* first page */
590 heapgetpage(scan
, page
);
592 scan
->rs_inited
= true;
596 /* continue from previously returned page/tuple */
597 page
= scan
->rs_cblock
; /* current page */
598 lineindex
= scan
->rs_cindex
+ 1;
601 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
602 lines
= scan
->rs_ntuples
;
603 /* page and lineindex now reference the next visible tid */
605 linesleft
= lines
- lineindex
;
609 if (!scan
->rs_inited
)
612 * return null immediately if relation is empty
614 if (scan
->rs_nblocks
== 0)
616 Assert(!BufferIsValid(scan
->rs_cbuf
));
617 tuple
->t_data
= NULL
;
622 * Disable reporting to syncscan logic in a backwards scan; it's
623 * not very likely anyone else is doing the same thing at the same
624 * time, and much more likely that we'll just bollix things for
627 scan
->rs_syncscan
= false;
628 /* start from last page of the scan */
629 if (scan
->rs_startblock
> 0)
630 page
= scan
->rs_startblock
- 1;
632 page
= scan
->rs_nblocks
- 1;
633 heapgetpage(scan
, page
);
637 /* continue from previously returned page/tuple */
638 page
= scan
->rs_cblock
; /* current page */
641 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
642 lines
= scan
->rs_ntuples
;
644 if (!scan
->rs_inited
)
646 lineindex
= lines
- 1;
647 scan
->rs_inited
= true;
651 lineindex
= scan
->rs_cindex
- 1;
653 /* page and lineindex now reference the previous visible tid */
655 linesleft
= lineindex
+ 1;
660 * ``no movement'' scan direction: refetch prior tuple
662 if (!scan
->rs_inited
)
664 Assert(!BufferIsValid(scan
->rs_cbuf
));
665 tuple
->t_data
= NULL
;
669 page
= ItemPointerGetBlockNumber(&(tuple
->t_self
));
670 if (page
!= scan
->rs_cblock
)
671 heapgetpage(scan
, page
);
673 /* Since the tuple was previously fetched, needn't lock page here */
674 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
675 lineoff
= ItemPointerGetOffsetNumber(&(tuple
->t_self
));
676 lpp
= PageGetItemId(dp
, lineoff
);
677 Assert(ItemIdIsNormal(lpp
));
679 tuple
->t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lpp
);
680 tuple
->t_len
= ItemIdGetLength(lpp
);
682 /* check that rs_cindex is in sync */
683 Assert(scan
->rs_cindex
< scan
->rs_ntuples
);
684 Assert(lineoff
== scan
->rs_vistuples
[scan
->rs_cindex
]);
690 * advance the scan until we find a qualifying tuple or run out of stuff
695 while (linesleft
> 0)
697 lineoff
= scan
->rs_vistuples
[lineindex
];
698 lpp
= PageGetItemId(dp
, lineoff
);
699 Assert(ItemIdIsNormal(lpp
));
701 tuple
->t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lpp
);
702 tuple
->t_len
= ItemIdGetLength(lpp
);
703 ItemPointerSet(&(tuple
->t_self
), page
, lineoff
);
706 * if current tuple qualifies, return it.
712 HeapKeyTest(tuple
, RelationGetDescr(scan
->rs_rd
),
716 scan
->rs_cindex
= lineindex
;
722 scan
->rs_cindex
= lineindex
;
727 * otherwise move to the next item on the page
737 * if we get here, it means we've exhausted the items on this page and
738 * it's time to move to the next.
742 finished
= (page
== scan
->rs_startblock
);
744 page
= scan
->rs_nblocks
;
750 if (page
>= scan
->rs_nblocks
)
752 finished
= (page
== scan
->rs_startblock
);
755 * Report our new scan position for synchronization purposes. We
756 * don't do that when moving backwards, however. That would just
757 * mess up any other forward-moving scanners.
759 * Note: we do this before checking for end of scan so that the
760 * final state of the position hint is back at the start of the
761 * rel. That's not strictly necessary, but otherwise when you run
762 * the same query multiple times the starting position would shift
763 * a little bit backwards on every invocation, which is confusing.
764 * We don't guarantee any specific ordering in general, though.
766 if (scan
->rs_syncscan
)
767 ss_report_location(scan
->rs_rd
, page
);
771 * return NULL if we've exhausted all the pages
775 if (BufferIsValid(scan
->rs_cbuf
))
776 ReleaseBuffer(scan
->rs_cbuf
);
777 scan
->rs_cbuf
= InvalidBuffer
;
778 scan
->rs_cblock
= InvalidBlockNumber
;
779 tuple
->t_data
= NULL
;
780 scan
->rs_inited
= false;
784 heapgetpage(scan
, page
);
786 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
787 lines
= scan
->rs_ntuples
;
790 lineindex
= lines
- 1;
797 #if defined(DISABLE_COMPLEX_MACRO)
799 * This is formatted so oddly so that the correspondence to the macro
800 * definition in access/heapam.h is maintained.
803 fastgetattr(HeapTuple tup
, int attnum
, TupleDesc tupleDesc
,
809 ((isnull
) ? (*(isnull
) = false) : (dummyret
) NULL
),
810 HeapTupleNoNulls(tup
) ?
812 (tupleDesc
)->attrs
[(attnum
) - 1]->attcacheoff
>= 0 ?
814 fetchatt((tupleDesc
)->attrs
[(attnum
) - 1],
815 (char *) (tup
)->t_data
+ (tup
)->t_data
->t_hoff
+
816 (tupleDesc
)->attrs
[(attnum
) - 1]->attcacheoff
)
819 nocachegetattr((tup
), (attnum
), (tupleDesc
), (isnull
))
823 att_isnull((attnum
) - 1, (tup
)->t_data
->t_bits
) ?
825 ((isnull
) ? (*(isnull
) = true) : (dummyret
) NULL
),
830 nocachegetattr((tup
), (attnum
), (tupleDesc
), (isnull
))
840 #endif /* defined(DISABLE_COMPLEX_MACRO) */
843 /* ----------------------------------------------------------------
844 * heap access method interface
845 * ----------------------------------------------------------------
849 * relation_open - open any relation by relation OID
851 * If lockmode is not "NoLock", the specified kind of lock is
852 * obtained on the relation. (Generally, NoLock should only be
853 * used if the caller knows it has some appropriate lock on the
856 * An error is raised if the relation does not exist.
858 * NB: a "relation" is anything with a pg_class entry. The caller is
859 * expected to check whether the relkind is something it can handle.
863 relation_open(Oid relationId
, LOCKMODE lockmode
)
867 Assert(lockmode
>= NoLock
&& lockmode
< MAX_LOCKMODES
);
869 /* Get the lock before trying to open the relcache entry */
870 if (lockmode
!= NoLock
)
871 LockRelationOid(relationId
, lockmode
);
873 /* The relcache does all the real work... */
874 r
= RelationIdGetRelation(relationId
);
876 if (!RelationIsValid(r
))
877 elog(ERROR
, "could not open relation with OID %u", relationId
);
879 /* Make note that we've accessed a temporary relation */
881 MyXactAccessedTempRel
= true;
889 * try_relation_open - open any relation by relation OID
891 * Same as relation_open, except return NULL instead of failing
892 * if the relation does not exist.
896 try_relation_open(Oid relationId
, LOCKMODE lockmode
)
900 Assert(lockmode
>= NoLock
&& lockmode
< MAX_LOCKMODES
);
902 /* Get the lock first */
903 if (lockmode
!= NoLock
)
904 LockRelationOid(relationId
, lockmode
);
907 * Now that we have the lock, probe to see if the relation really exists
910 if (!SearchSysCacheExists(RELOID
,
911 ObjectIdGetDatum(relationId
),
914 /* Release useless lock */
915 if (lockmode
!= NoLock
)
916 UnlockRelationOid(relationId
, lockmode
);
921 /* Should be safe to do a relcache load */
922 r
= RelationIdGetRelation(relationId
);
924 if (!RelationIsValid(r
))
925 elog(ERROR
, "could not open relation with OID %u", relationId
);
927 /* Make note that we've accessed a temporary relation */
929 MyXactAccessedTempRel
= true;
937 * relation_open_nowait - open but don't wait for lock
939 * Same as relation_open, except throw an error instead of waiting
940 * when the requested lock is not immediately obtainable.
944 relation_open_nowait(Oid relationId
, LOCKMODE lockmode
)
948 Assert(lockmode
>= NoLock
&& lockmode
< MAX_LOCKMODES
);
950 /* Get the lock before trying to open the relcache entry */
951 if (lockmode
!= NoLock
)
953 if (!ConditionalLockRelationOid(relationId
, lockmode
))
955 /* try to throw error by name; relation could be deleted... */
956 char *relname
= get_rel_name(relationId
);
960 (errcode(ERRCODE_LOCK_NOT_AVAILABLE
),
961 errmsg("could not obtain lock on relation \"%s\"",
965 (errcode(ERRCODE_LOCK_NOT_AVAILABLE
),
966 errmsg("could not obtain lock on relation with OID %u",
971 /* The relcache does all the real work... */
972 r
= RelationIdGetRelation(relationId
);
974 if (!RelationIsValid(r
))
975 elog(ERROR
, "could not open relation with OID %u", relationId
);
977 /* Make note that we've accessed a temporary relation */
979 MyXactAccessedTempRel
= true;
987 * relation_openrv - open any relation specified by a RangeVar
989 * Same as relation_open, but the relation is specified by a RangeVar.
993 relation_openrv(const RangeVar
*relation
, LOCKMODE lockmode
)
998 * Check for shared-cache-inval messages before trying to open the
999 * relation. This is needed to cover the case where the name identifies a
1000 * rel that has been dropped and recreated since the start of our
1001 * transaction: if we don't flush the old syscache entry then we'll latch
1002 * onto that entry and suffer an error when we do RelationIdGetRelation.
1003 * Note that relation_open does not need to do this, since a relation's
1004 * OID never changes.
1006 * We skip this if asked for NoLock, on the assumption that the caller has
1007 * already ensured some appropriate lock is held.
1009 if (lockmode
!= NoLock
)
1010 AcceptInvalidationMessages();
1012 /* Look up the appropriate relation using namespace search */
1013 relOid
= RangeVarGetRelid(relation
, false);
1015 /* Let relation_open do the rest */
1016 return relation_open(relOid
, lockmode
);
1020 * try_relation_openrv - open any relation specified by a RangeVar
1022 * Same as relation_openrv, but return NULL instead of failing for
1023 * relation-not-found. (Note that some other causes, such as
1024 * permissions problems, will still result in an ereport.)
1028 try_relation_openrv(const RangeVar
*relation
, LOCKMODE lockmode
)
1033 * Check for shared-cache-inval messages before trying to open the
1034 * relation. This is needed to cover the case where the name identifies a
1035 * rel that has been dropped and recreated since the start of our
1036 * transaction: if we don't flush the old syscache entry then we'll latch
1037 * onto that entry and suffer an error when we do RelationIdGetRelation.
1038 * Note that relation_open does not need to do this, since a relation's
1039 * OID never changes.
1041 * We skip this if asked for NoLock, on the assumption that the caller has
1042 * already ensured some appropriate lock is held.
1044 if (lockmode
!= NoLock
)
1045 AcceptInvalidationMessages();
1047 /* Look up the appropriate relation using namespace search */
1048 relOid
= RangeVarGetRelid(relation
, true);
1050 /* Return NULL on not-found */
1051 if (!OidIsValid(relOid
))
1054 /* Let relation_open do the rest */
1055 return relation_open(relOid
, lockmode
);
1059 * relation_close - close any relation
1061 * If lockmode is not "NoLock", we then release the specified lock.
1063 * Note that it is often sensible to hold a lock beyond relation_close;
1064 * in that case, the lock is released automatically at xact end.
1068 relation_close(Relation relation
, LOCKMODE lockmode
)
1070 LockRelId relid
= relation
->rd_lockInfo
.lockRelId
;
1072 Assert(lockmode
>= NoLock
&& lockmode
< MAX_LOCKMODES
);
1074 /* The relcache does the real work... */
1075 RelationClose(relation
);
1077 if (lockmode
!= NoLock
)
1078 UnlockRelationId(&relid
, lockmode
);
1083 * heap_open - open a heap relation by relation OID
1085 * This is essentially relation_open plus check that the relation
1086 * is not an index nor a composite type. (The caller should also
1087 * check that it's not a view before assuming it has storage.)
1091 heap_open(Oid relationId
, LOCKMODE lockmode
)
1095 r
= relation_open(relationId
, lockmode
);
1097 if (r
->rd_rel
->relkind
== RELKIND_INDEX
)
1099 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1100 errmsg("\"%s\" is an index",
1101 RelationGetRelationName(r
))));
1102 else if (r
->rd_rel
->relkind
== RELKIND_COMPOSITE_TYPE
)
1104 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1105 errmsg("\"%s\" is a composite type",
1106 RelationGetRelationName(r
))));
1112 * heap_openrv - open a heap relation specified
1113 * by a RangeVar node
1115 * As above, but relation is specified by a RangeVar.
1119 heap_openrv(const RangeVar
*relation
, LOCKMODE lockmode
)
1123 r
= relation_openrv(relation
, lockmode
);
1125 if (r
->rd_rel
->relkind
== RELKIND_INDEX
)
1127 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1128 errmsg("\"%s\" is an index",
1129 RelationGetRelationName(r
))));
1130 else if (r
->rd_rel
->relkind
== RELKIND_COMPOSITE_TYPE
)
1132 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1133 errmsg("\"%s\" is a composite type",
1134 RelationGetRelationName(r
))));
1140 * try_heap_openrv - open a heap relation specified
1141 * by a RangeVar node
1143 * As above, but return NULL instead of failing for relation-not-found.
1147 try_heap_openrv(const RangeVar
*relation
, LOCKMODE lockmode
)
1151 r
= try_relation_openrv(relation
, lockmode
);
1155 if (r
->rd_rel
->relkind
== RELKIND_INDEX
)
1157 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1158 errmsg("\"%s\" is an index",
1159 RelationGetRelationName(r
))));
1160 else if (r
->rd_rel
->relkind
== RELKIND_COMPOSITE_TYPE
)
1162 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1163 errmsg("\"%s\" is a composite type",
1164 RelationGetRelationName(r
))));
1172 * heap_beginscan - begin relation scan
1174 * heap_beginscan_strat offers an extended API that lets the caller control
1175 * whether a nondefault buffer access strategy can be used, and whether
1176 * syncscan can be chosen (possibly resulting in the scan not starting from
1177 * block zero). Both of these default to TRUE with plain heap_beginscan.
1179 * heap_beginscan_bm is an alternative entry point for setting up a
1180 * HeapScanDesc for a bitmap heap scan. Although that scan technology is
1181 * really quite unlike a standard seqscan, there is just enough commonality
1182 * to make it worth using the same data structure.
1186 heap_beginscan(Relation relation
, Snapshot snapshot
,
1187 int nkeys
, ScanKey key
)
1189 return heap_beginscan_internal(relation
, snapshot
, nkeys
, key
,
1194 heap_beginscan_strat(Relation relation
, Snapshot snapshot
,
1195 int nkeys
, ScanKey key
,
1196 bool allow_strat
, bool allow_sync
)
1198 return heap_beginscan_internal(relation
, snapshot
, nkeys
, key
,
1199 allow_strat
, allow_sync
, false);
1203 heap_beginscan_bm(Relation relation
, Snapshot snapshot
,
1204 int nkeys
, ScanKey key
)
1206 return heap_beginscan_internal(relation
, snapshot
, nkeys
, key
,
1207 false, false, true);
1211 heap_beginscan_internal(Relation relation
, Snapshot snapshot
,
1212 int nkeys
, ScanKey key
,
1213 bool allow_strat
, bool allow_sync
,
1219 * increment relation ref count while scanning relation
1221 * This is just to make really sure the relcache entry won't go away while
1222 * the scan has a pointer to it. Caller should be holding the rel open
1223 * anyway, so this is redundant in all normal scenarios...
1225 RelationIncrementReferenceCount(relation
);
1228 * allocate and initialize scan descriptor
1230 scan
= (HeapScanDesc
) palloc(sizeof(HeapScanDescData
));
1232 scan
->rs_rd
= relation
;
1233 scan
->rs_snapshot
= snapshot
;
1234 scan
->rs_nkeys
= nkeys
;
1235 scan
->rs_bitmapscan
= is_bitmapscan
;
1236 scan
->rs_strategy
= NULL
; /* set in initscan */
1237 scan
->rs_allow_strat
= allow_strat
;
1238 scan
->rs_allow_sync
= allow_sync
;
1241 * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1243 scan
->rs_pageatatime
= IsMVCCSnapshot(snapshot
);
1245 /* we only need to set this up once */
1246 scan
->rs_ctup
.t_tableOid
= RelationGetRelid(relation
);
1249 * we do this here instead of in initscan() because heap_rescan also calls
1250 * initscan() and we don't want to allocate memory again
1253 scan
->rs_key
= (ScanKey
) palloc(sizeof(ScanKeyData
) * nkeys
);
1255 scan
->rs_key
= NULL
;
1257 initscan(scan
, key
);
1263 * heap_rescan - restart a relation scan
1267 heap_rescan(HeapScanDesc scan
,
1271 * unpin scan buffers
1273 if (BufferIsValid(scan
->rs_cbuf
))
1274 ReleaseBuffer(scan
->rs_cbuf
);
1277 * reinitialize scan descriptor
1279 initscan(scan
, key
);
1283 * heap_endscan - end relation scan
1285 * See how to integrate with index scans.
1286 * Check handling if reldesc caching.
1290 heap_endscan(HeapScanDesc scan
)
1292 /* Note: no locking manipulations needed */
1295 * unpin scan buffers
1297 if (BufferIsValid(scan
->rs_cbuf
))
1298 ReleaseBuffer(scan
->rs_cbuf
);
1301 * decrement relation reference count and free scan descriptor storage
1303 RelationDecrementReferenceCount(scan
->rs_rd
);
1306 pfree(scan
->rs_key
);
1308 if (scan
->rs_strategy
!= NULL
)
1309 FreeAccessStrategy(scan
->rs_strategy
);
1315 * heap_getnext - retrieve next tuple in scan
1317 * Fix to work with index relations.
1318 * We don't return the buffer anymore, but you can get it from the
1319 * returned HeapTuple.
1324 #define HEAPDEBUG_1 \
1325 elog(DEBUG2, "heap_getnext([%s,nkeys=%d],dir=%d) called", \
1326 RelationGetRelationName(scan->rs_rd), scan->rs_nkeys, (int) direction)
1327 #define HEAPDEBUG_2 \
1328 elog(DEBUG2, "heap_getnext returning EOS")
1329 #define HEAPDEBUG_3 \
1330 elog(DEBUG2, "heap_getnext returning tuple")
1335 #endif /* !defined(HEAPDEBUGALL) */
1339 heap_getnext(HeapScanDesc scan
, ScanDirection direction
)
1341 /* Note: no locking manipulations needed */
1343 HEAPDEBUG_1
; /* heap_getnext( info ) */
1345 if (scan
->rs_pageatatime
)
1346 heapgettup_pagemode(scan
, direction
,
1347 scan
->rs_nkeys
, scan
->rs_key
);
1349 heapgettup(scan
, direction
, scan
->rs_nkeys
, scan
->rs_key
);
1351 if (scan
->rs_ctup
.t_data
== NULL
)
1353 HEAPDEBUG_2
; /* heap_getnext returning EOS */
1358 * if we get here it means we have a new current scan tuple, so point to
1359 * the proper return buffer and return the tuple.
1361 HEAPDEBUG_3
; /* heap_getnext returning tuple */
1363 pgstat_count_heap_getnext(scan
->rs_rd
);
1365 return &(scan
->rs_ctup
);
1369 * heap_fetch - retrieve tuple with given tid
1371 * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1372 * the tuple, fill in the remaining fields of *tuple, and check the tuple
1373 * against the specified snapshot.
1375 * If successful (tuple found and passes snapshot time qual), then *userbuf
1376 * is set to the buffer holding the tuple and TRUE is returned. The caller
1377 * must unpin the buffer when done with the tuple.
1379 * If the tuple is not found (ie, item number references a deleted slot),
1380 * then tuple->t_data is set to NULL and FALSE is returned.
1382 * If the tuple is found but fails the time qual check, then FALSE is returned
1383 * but tuple->t_data is left pointing to the tuple.
1385 * keep_buf determines what is done with the buffer in the FALSE-result cases.
1386 * When the caller specifies keep_buf = true, we retain the pin on the buffer
1387 * and return it in *userbuf (so the caller must eventually unpin it); when
1388 * keep_buf = false, the pin is released and *userbuf is set to InvalidBuffer.
1390 * stats_relation is the relation to charge the heap_fetch operation against
1391 * for statistical purposes. (This could be the heap rel itself, an
1392 * associated index, or NULL to not count the fetch at all.)
1394 * heap_fetch does not follow HOT chains: only the exact TID requested will
1397 * It is somewhat inconsistent that we ereport() on invalid block number but
1398 * return false on invalid item number. There are a couple of reasons though.
1399 * One is that the caller can relatively easily check the block number for
1400 * validity, but cannot check the item number without reading the page
1401 * himself. Another is that when we are following a t_ctid link, we can be
1402 * reasonably confident that the page number is valid (since VACUUM shouldn't
1403 * truncate off the destination page without having killed the referencing
1404 * tuple first), but the item number might well not be good.
1407 heap_fetch(Relation relation
,
1412 Relation stats_relation
)
1414 ItemPointer tid
= &(tuple
->t_self
);
1418 OffsetNumber offnum
;
1422 * Fetch and pin the appropriate page of the relation.
1424 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(tid
));
1427 * Need share lock on buffer to examine tuple commit status.
1429 LockBuffer(buffer
, BUFFER_LOCK_SHARE
);
1430 page
= BufferGetPage(buffer
);
1433 * We'd better check for out-of-range offnum in case of VACUUM since the
1436 offnum
= ItemPointerGetOffsetNumber(tid
);
1437 if (offnum
< FirstOffsetNumber
|| offnum
> PageGetMaxOffsetNumber(page
))
1439 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
1444 ReleaseBuffer(buffer
);
1445 *userbuf
= InvalidBuffer
;
1447 tuple
->t_data
= NULL
;
1452 * get the item line pointer corresponding to the requested tid
1454 lp
= PageGetItemId(page
, offnum
);
1457 * Must check for deleted tuple.
1459 if (!ItemIdIsNormal(lp
))
1461 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
1466 ReleaseBuffer(buffer
);
1467 *userbuf
= InvalidBuffer
;
1469 tuple
->t_data
= NULL
;
1474 * fill in *tuple fields
1476 tuple
->t_data
= (HeapTupleHeader
) PageGetItem(page
, lp
);
1477 tuple
->t_len
= ItemIdGetLength(lp
);
1478 tuple
->t_tableOid
= RelationGetRelid(relation
);
1481 * check time qualification of tuple, then release lock
1483 valid
= HeapTupleSatisfiesVisibility(tuple
, snapshot
, buffer
);
1485 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
1490 * All checks passed, so return the tuple as valid. Caller is now
1491 * responsible for releasing the buffer.
1495 /* Count the successful fetch against appropriate rel, if any */
1496 if (stats_relation
!= NULL
)
1497 pgstat_count_heap_fetch(stats_relation
);
1502 /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1507 ReleaseBuffer(buffer
);
1508 *userbuf
= InvalidBuffer
;
1515 * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
1517 * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
1518 * of a HOT chain), and buffer is the buffer holding this tuple. We search
1519 * for the first chain member satisfying the given snapshot. If one is
1520 * found, we update *tid to reference that tuple's offset number, and
1521 * return TRUE. If no match, return FALSE without modifying *tid.
1523 * If all_dead is not NULL, we check non-visible tuples to see if they are
1524 * globally dead; *all_dead is set TRUE if all members of the HOT chain
1525 * are vacuumable, FALSE if not.
1527 * Unlike heap_fetch, the caller must already have pin and (at least) share
1528 * lock on the buffer; it is still pinned/locked at exit. Also unlike
1529 * heap_fetch, we do not report any pgstats count; caller may do so if wanted.
1532 heap_hot_search_buffer(ItemPointer tid
, Buffer buffer
, Snapshot snapshot
,
1535 Page dp
= (Page
) BufferGetPage(buffer
);
1536 TransactionId prev_xmax
= InvalidTransactionId
;
1537 OffsetNumber offnum
;
1538 bool at_chain_start
;
1543 Assert(TransactionIdIsValid(RecentGlobalXmin
));
1545 Assert(ItemPointerGetBlockNumber(tid
) == BufferGetBlockNumber(buffer
));
1546 offnum
= ItemPointerGetOffsetNumber(tid
);
1547 at_chain_start
= true;
1549 /* Scan through possible multiple members of HOT-chain */
1553 HeapTupleData heapTuple
;
1555 /* check for bogus TID */
1556 if (offnum
< FirstOffsetNumber
|| offnum
> PageGetMaxOffsetNumber(dp
))
1559 lp
= PageGetItemId(dp
, offnum
);
1561 /* check for unused, dead, or redirected items */
1562 if (!ItemIdIsNormal(lp
))
1564 /* We should only see a redirect at start of chain */
1565 if (ItemIdIsRedirected(lp
) && at_chain_start
)
1567 /* Follow the redirect */
1568 offnum
= ItemIdGetRedirect(lp
);
1569 at_chain_start
= false;
1572 /* else must be end of chain */
1576 heapTuple
.t_data
= (HeapTupleHeader
) PageGetItem(dp
, lp
);
1577 heapTuple
.t_len
= ItemIdGetLength(lp
);
1580 * Shouldn't see a HEAP_ONLY tuple at chain start.
1582 if (at_chain_start
&& HeapTupleIsHeapOnly(&heapTuple
))
1586 * The xmin should match the previous xmax value, else chain is
1589 if (TransactionIdIsValid(prev_xmax
) &&
1590 !TransactionIdEquals(prev_xmax
,
1591 HeapTupleHeaderGetXmin(heapTuple
.t_data
)))
1594 /* If it's visible per the snapshot, we must return it */
1595 if (HeapTupleSatisfiesVisibility(&heapTuple
, snapshot
, buffer
))
1597 ItemPointerSetOffsetNumber(tid
, offnum
);
1604 * If we can't see it, maybe no one else can either. At caller
1605 * request, check whether all chain members are dead to all
1608 if (all_dead
&& *all_dead
&&
1609 HeapTupleSatisfiesVacuum(heapTuple
.t_data
, RecentGlobalXmin
,
1610 buffer
) != HEAPTUPLE_DEAD
)
1614 * Check to see if HOT chain continues past this tuple; if so fetch
1615 * the next offnum and loop around.
1617 if (HeapTupleIsHotUpdated(&heapTuple
))
1619 Assert(ItemPointerGetBlockNumber(&heapTuple
.t_data
->t_ctid
) ==
1620 ItemPointerGetBlockNumber(tid
));
1621 offnum
= ItemPointerGetOffsetNumber(&heapTuple
.t_data
->t_ctid
);
1622 at_chain_start
= false;
1623 prev_xmax
= HeapTupleHeaderGetXmax(heapTuple
.t_data
);
1626 break; /* end of chain */
1633 * heap_hot_search - search HOT chain for tuple satisfying snapshot
1635 * This has the same API as heap_hot_search_buffer, except that the caller
1636 * does not provide the buffer containing the page, rather we access it
1640 heap_hot_search(ItemPointer tid
, Relation relation
, Snapshot snapshot
,
1646 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(tid
));
1647 LockBuffer(buffer
, BUFFER_LOCK_SHARE
);
1648 result
= heap_hot_search_buffer(tid
, buffer
, snapshot
, all_dead
);
1649 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
1650 ReleaseBuffer(buffer
);
1655 * heap_get_latest_tid - get the latest tid of a specified tuple
1657 * Actually, this gets the latest version that is visible according to
1658 * the passed snapshot. You can pass SnapshotDirty to get the very latest,
1659 * possibly uncommitted version.
1661 * *tid is both an input and an output parameter: it is updated to
1662 * show the latest version of the row. Note that it will not be changed
1663 * if no version of the row passes the snapshot test.
1666 heap_get_latest_tid(Relation relation
,
1671 ItemPointerData ctid
;
1672 TransactionId priorXmax
;
1674 /* this is to avoid Assert failures on bad input */
1675 if (!ItemPointerIsValid(tid
))
1679 * Since this can be called with user-supplied TID, don't trust the input
1680 * too much. (RelationGetNumberOfBlocks is an expensive check, so we
1681 * don't check t_ctid links again this way. Note that it would not do to
1682 * call it just once and save the result, either.)
1684 blk
= ItemPointerGetBlockNumber(tid
);
1685 if (blk
>= RelationGetNumberOfBlocks(relation
))
1686 elog(ERROR
, "block number %u is out of range for relation \"%s\"",
1687 blk
, RelationGetRelationName(relation
));
1690 * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1691 * need to examine, and *tid is the TID we will return if ctid turns out
1694 * Note that we will loop until we reach the end of the t_ctid chain.
1695 * Depending on the snapshot passed, there might be at most one visible
1696 * version of the row, but we don't try to optimize for that.
1699 priorXmax
= InvalidTransactionId
; /* cannot check first XMIN */
1704 OffsetNumber offnum
;
1710 * Read, pin, and lock the page.
1712 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(&ctid
));
1713 LockBuffer(buffer
, BUFFER_LOCK_SHARE
);
1714 page
= BufferGetPage(buffer
);
1717 * Check for bogus item number. This is not treated as an error
1718 * condition because it can happen while following a t_ctid link. We
1719 * just assume that the prior tid is OK and return it unchanged.
1721 offnum
= ItemPointerGetOffsetNumber(&ctid
);
1722 if (offnum
< FirstOffsetNumber
|| offnum
> PageGetMaxOffsetNumber(page
))
1724 UnlockReleaseBuffer(buffer
);
1727 lp
= PageGetItemId(page
, offnum
);
1728 if (!ItemIdIsNormal(lp
))
1730 UnlockReleaseBuffer(buffer
);
1734 /* OK to access the tuple */
1736 tp
.t_data
= (HeapTupleHeader
) PageGetItem(page
, lp
);
1737 tp
.t_len
= ItemIdGetLength(lp
);
1740 * After following a t_ctid link, we might arrive at an unrelated
1741 * tuple. Check for XMIN match.
1743 if (TransactionIdIsValid(priorXmax
) &&
1744 !TransactionIdEquals(priorXmax
, HeapTupleHeaderGetXmin(tp
.t_data
)))
1746 UnlockReleaseBuffer(buffer
);
1751 * Check time qualification of tuple; if visible, set it as the new
1754 valid
= HeapTupleSatisfiesVisibility(&tp
, snapshot
, buffer
);
1759 * If there's a valid t_ctid link, follow it, else we're done.
1761 if ((tp
.t_data
->t_infomask
& (HEAP_XMAX_INVALID
| HEAP_IS_LOCKED
)) ||
1762 ItemPointerEquals(&tp
.t_self
, &tp
.t_data
->t_ctid
))
1764 UnlockReleaseBuffer(buffer
);
1768 ctid
= tp
.t_data
->t_ctid
;
1769 priorXmax
= HeapTupleHeaderGetXmax(tp
.t_data
);
1770 UnlockReleaseBuffer(buffer
);
1776 * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
1778 * This is called after we have waited for the XMAX transaction to terminate.
1779 * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
1780 * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
1781 * hint bit if possible --- but beware that that may not yet be possible,
1782 * if the transaction committed asynchronously. Hence callers should look
1783 * only at XMAX_INVALID.
1786 UpdateXmaxHintBits(HeapTupleHeader tuple
, Buffer buffer
, TransactionId xid
)
1788 Assert(TransactionIdEquals(HeapTupleHeaderGetXmax(tuple
), xid
));
1790 if (!(tuple
->t_infomask
& (HEAP_XMAX_COMMITTED
| HEAP_XMAX_INVALID
)))
1792 if (TransactionIdDidCommit(xid
))
1793 HeapTupleSetHintBits(tuple
, buffer
, HEAP_XMAX_COMMITTED
,
1796 HeapTupleSetHintBits(tuple
, buffer
, HEAP_XMAX_INVALID
,
1797 InvalidTransactionId
);
1803 * heap_insert - insert tuple into a heap
1805 * The new tuple is stamped with current transaction ID and the specified
1808 * If use_wal is false, the new tuple is not logged in WAL, even for a
1809 * non-temp relation. Safe usage of this behavior requires that we arrange
1810 * that all new tuples go into new pages not containing any tuples from other
1811 * transactions, and that the relation gets fsync'd before commit.
1812 * (See also heap_sync() comments)
1814 * use_fsm is passed directly to RelationGetBufferForTuple, which see for
1817 * Note that use_wal and use_fsm will be applied when inserting into the
1818 * heap's TOAST table, too, if the tuple requires any out-of-line data.
1820 * The return value is the OID assigned to the tuple (either here or by the
1821 * caller), or InvalidOid if no OID. The header fields of *tup are updated
1822 * to match the stored tuple; in particular tup->t_self receives the actual
1823 * TID where the tuple was stored. But note that any toasting of fields
1824 * within the tuple data is NOT reflected into *tup.
1827 heap_insert(Relation relation
, HeapTuple tup
, CommandId cid
,
1828 bool use_wal
, bool use_fsm
)
1830 TransactionId xid
= GetCurrentTransactionId();
1834 if (relation
->rd_rel
->relhasoids
)
1837 /* this is redundant with an Assert in HeapTupleSetOid */
1838 Assert(tup
->t_data
->t_infomask
& HEAP_HASOID
);
1842 * If the object id of this tuple has already been assigned, trust the
1843 * caller. There are a couple of ways this can happen. At initial db
1844 * creation, the backend program sets oids for tuples. When we define
1845 * an index, we set the oid. Finally, in the future, we may allow
1846 * users to set their own object ids in order to support a persistent
1847 * object store (objects need to contain pointers to one another).
1849 if (!OidIsValid(HeapTupleGetOid(tup
)))
1850 HeapTupleSetOid(tup
, GetNewOid(relation
));
1854 /* check there is not space for an OID */
1855 Assert(!(tup
->t_data
->t_infomask
& HEAP_HASOID
));
1858 tup
->t_data
->t_infomask
&= ~(HEAP_XACT_MASK
);
1859 tup
->t_data
->t_infomask2
&= ~(HEAP2_XACT_MASK
);
1860 tup
->t_data
->t_infomask
|= HEAP_XMAX_INVALID
;
1861 HeapTupleHeaderSetXmin(tup
->t_data
, xid
);
1862 HeapTupleHeaderSetCmin(tup
->t_data
, cid
);
1863 HeapTupleHeaderSetXmax(tup
->t_data
, 0); /* for cleanliness */
1864 tup
->t_tableOid
= RelationGetRelid(relation
);
1867 * If the new tuple is too big for storage or contains already toasted
1868 * out-of-line attributes from some other relation, invoke the toaster.
1870 * Note: below this point, heaptup is the data we actually intend to store
1871 * into the relation; tup is the caller's original untoasted data.
1873 if (relation
->rd_rel
->relkind
!= RELKIND_RELATION
)
1875 /* toast table entries should never be recursively toasted */
1876 Assert(!HeapTupleHasExternal(tup
));
1879 else if (HeapTupleHasExternal(tup
) || tup
->t_len
> TOAST_TUPLE_THRESHOLD
)
1880 heaptup
= toast_insert_or_update(relation
, tup
, NULL
,
1885 /* Find buffer to insert this tuple into */
1886 buffer
= RelationGetBufferForTuple(relation
, heaptup
->t_len
,
1887 InvalidBuffer
, use_fsm
);
1889 /* NO EREPORT(ERROR) from here till changes are logged */
1890 START_CRIT_SECTION();
1892 RelationPutHeapTuple(relation
, buffer
, heaptup
);
1895 * XXX Should we set PageSetPrunable on this page ?
1897 * The inserting transaction may eventually abort thus making this tuple
1898 * DEAD and hence available for pruning. Though we don't want to optimize
1899 * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
1900 * aborted tuple will never be pruned until next vacuum is triggered.
1902 * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
1905 MarkBufferDirty(buffer
);
1908 if (use_wal
&& !relation
->rd_istemp
)
1910 xl_heap_insert xlrec
;
1911 xl_heap_header xlhdr
;
1913 XLogRecData rdata
[3];
1914 Page page
= BufferGetPage(buffer
);
1915 uint8 info
= XLOG_HEAP_INSERT
;
1917 xlrec
.target
.node
= relation
->rd_node
;
1918 xlrec
.target
.tid
= heaptup
->t_self
;
1919 rdata
[0].data
= (char *) &xlrec
;
1920 rdata
[0].len
= SizeOfHeapInsert
;
1921 rdata
[0].buffer
= InvalidBuffer
;
1922 rdata
[0].next
= &(rdata
[1]);
1924 xlhdr
.t_infomask2
= heaptup
->t_data
->t_infomask2
;
1925 xlhdr
.t_infomask
= heaptup
->t_data
->t_infomask
;
1926 xlhdr
.t_hoff
= heaptup
->t_data
->t_hoff
;
1929 * note we mark rdata[1] as belonging to buffer; if XLogInsert decides
1930 * to write the whole page to the xlog, we don't need to store
1931 * xl_heap_header in the xlog.
1933 rdata
[1].data
= (char *) &xlhdr
;
1934 rdata
[1].len
= SizeOfHeapHeader
;
1935 rdata
[1].buffer
= buffer
;
1936 rdata
[1].buffer_std
= true;
1937 rdata
[1].next
= &(rdata
[2]);
1939 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
1940 rdata
[2].data
= (char *) heaptup
->t_data
+ offsetof(HeapTupleHeaderData
, t_bits
);
1941 rdata
[2].len
= heaptup
->t_len
- offsetof(HeapTupleHeaderData
, t_bits
);
1942 rdata
[2].buffer
= buffer
;
1943 rdata
[2].buffer_std
= true;
1944 rdata
[2].next
= NULL
;
1947 * If this is the single and first tuple on page, we can reinit the
1948 * page instead of restoring the whole thing. Set flag, and hide
1949 * buffer references from XLogInsert.
1951 if (ItemPointerGetOffsetNumber(&(heaptup
->t_self
)) == FirstOffsetNumber
&&
1952 PageGetMaxOffsetNumber(page
) == FirstOffsetNumber
)
1954 info
|= XLOG_HEAP_INIT_PAGE
;
1955 rdata
[1].buffer
= rdata
[2].buffer
= InvalidBuffer
;
1958 recptr
= XLogInsert(RM_HEAP_ID
, info
, rdata
);
1960 PageSetLSN(page
, recptr
);
1961 PageSetTLI(page
, ThisTimeLineID
);
1966 UnlockReleaseBuffer(buffer
);
1969 * If tuple is cachable, mark it for invalidation from the caches in case
1970 * we abort. Note it is OK to do this after releasing the buffer, because
1971 * the heaptup data structure is all in local memory, not in the shared
1974 CacheInvalidateHeapTuple(relation
, heaptup
);
1976 pgstat_count_heap_insert(relation
);
1979 * If heaptup is a private copy, release it. Don't forget to copy t_self
1980 * back to the caller's image, too.
1984 tup
->t_self
= heaptup
->t_self
;
1985 heap_freetuple(heaptup
);
1988 return HeapTupleGetOid(tup
);
1992 * simple_heap_insert - insert a tuple
1994 * Currently, this routine differs from heap_insert only in supplying
1995 * a default command ID and not allowing access to the speedup options.
1997 * This should be used rather than using heap_insert directly in most places
1998 * where we are modifying system catalogs.
2001 simple_heap_insert(Relation relation
, HeapTuple tup
)
2003 return heap_insert(relation
, tup
, GetCurrentCommandId(true), true, true);
2007 * heap_delete - delete a tuple
2009 * NB: do not call this directly unless you are prepared to deal with
2010 * concurrent-update conditions. Use simple_heap_delete instead.
2012 * relation - table to be modified (caller must hold suitable lock)
2013 * tid - TID of tuple to be deleted
2014 * ctid - output parameter, used only for failure case (see below)
2015 * update_xmax - output parameter, used only for failure case (see below)
2016 * cid - delete command ID (used for visibility test, and stored into
2017 * cmax if successful)
2018 * crosscheck - if not InvalidSnapshot, also check tuple against this
2019 * wait - true if should wait for any conflicting update to commit/abort
2021 * Normal, successful return value is HeapTupleMayBeUpdated, which
2022 * actually means we did delete it. Failure return codes are
2023 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
2024 * (the last only possible if wait == false).
2026 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
2027 * If t_ctid is the same as tid, the tuple was deleted; if different, the
2028 * tuple was updated, and t_ctid is the location of the replacement tuple.
2029 * (t_xmax is needed to verify that the replacement tuple matches.)
2032 heap_delete(Relation relation
, ItemPointer tid
,
2033 ItemPointer ctid
, TransactionId
*update_xmax
,
2034 CommandId cid
, Snapshot crosscheck
, bool wait
)
2037 TransactionId xid
= GetCurrentTransactionId();
2042 bool have_tuple_lock
= false;
2045 Assert(ItemPointerIsValid(tid
));
2047 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(tid
));
2048 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2050 page
= BufferGetPage(buffer
);
2051 lp
= PageGetItemId(page
, ItemPointerGetOffsetNumber(tid
));
2052 Assert(ItemIdIsNormal(lp
));
2054 tp
.t_data
= (HeapTupleHeader
) PageGetItem(page
, lp
);
2055 tp
.t_len
= ItemIdGetLength(lp
);
2059 result
= HeapTupleSatisfiesUpdate(tp
.t_data
, cid
, buffer
);
2061 if (result
== HeapTupleInvisible
)
2063 UnlockReleaseBuffer(buffer
);
2064 elog(ERROR
, "attempted to delete invisible tuple");
2066 else if (result
== HeapTupleBeingUpdated
&& wait
)
2068 TransactionId xwait
;
2071 /* must copy state data before unlocking buffer */
2072 xwait
= HeapTupleHeaderGetXmax(tp
.t_data
);
2073 infomask
= tp
.t_data
->t_infomask
;
2075 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2078 * Acquire tuple lock to establish our priority for the tuple (see
2079 * heap_lock_tuple). LockTuple will release us when we are
2080 * next-in-line for the tuple.
2082 * If we are forced to "start over" below, we keep the tuple lock;
2083 * this arranges that we stay at the head of the line while rechecking
2086 if (!have_tuple_lock
)
2088 LockTuple(relation
, &(tp
.t_self
), ExclusiveLock
);
2089 have_tuple_lock
= true;
2093 * Sleep until concurrent transaction ends. Note that we don't care
2094 * if the locker has an exclusive or shared lock, because we need
2098 if (infomask
& HEAP_XMAX_IS_MULTI
)
2100 /* wait for multixact */
2101 MultiXactIdWait((MultiXactId
) xwait
);
2102 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2105 * If xwait had just locked the tuple then some other xact could
2106 * update this tuple before we get to this point. Check for xmax
2107 * change, and start over if so.
2109 if (!(tp
.t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
2110 !TransactionIdEquals(HeapTupleHeaderGetXmax(tp
.t_data
),
2115 * You might think the multixact is necessarily done here, but not
2116 * so: it could have surviving members, namely our own xact or
2117 * other subxacts of this backend. It is legal for us to delete
2118 * the tuple in either case, however (the latter case is
2119 * essentially a situation of upgrading our former shared lock to
2120 * exclusive). We don't bother changing the on-disk hint bits
2121 * since we are about to overwrite the xmax altogether.
2126 /* wait for regular transaction to end */
2127 XactLockTableWait(xwait
);
2128 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2131 * xwait is done, but if xwait had just locked the tuple then some
2132 * other xact could update this tuple before we get to this point.
2133 * Check for xmax change, and start over if so.
2135 if ((tp
.t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
2136 !TransactionIdEquals(HeapTupleHeaderGetXmax(tp
.t_data
),
2140 /* Otherwise check if it committed or aborted */
2141 UpdateXmaxHintBits(tp
.t_data
, buffer
, xwait
);
2145 * We may overwrite if previous xmax aborted, or if it committed but
2146 * only locked the tuple without updating it.
2148 if (tp
.t_data
->t_infomask
& (HEAP_XMAX_INVALID
|
2150 result
= HeapTupleMayBeUpdated
;
2152 result
= HeapTupleUpdated
;
2155 if (crosscheck
!= InvalidSnapshot
&& result
== HeapTupleMayBeUpdated
)
2157 /* Perform additional check for serializable RI updates */
2158 if (!HeapTupleSatisfiesVisibility(&tp
, crosscheck
, buffer
))
2159 result
= HeapTupleUpdated
;
2162 if (result
!= HeapTupleMayBeUpdated
)
2164 Assert(result
== HeapTupleSelfUpdated
||
2165 result
== HeapTupleUpdated
||
2166 result
== HeapTupleBeingUpdated
);
2167 Assert(!(tp
.t_data
->t_infomask
& HEAP_XMAX_INVALID
));
2168 *ctid
= tp
.t_data
->t_ctid
;
2169 *update_xmax
= HeapTupleHeaderGetXmax(tp
.t_data
);
2170 UnlockReleaseBuffer(buffer
);
2171 if (have_tuple_lock
)
2172 UnlockTuple(relation
, &(tp
.t_self
), ExclusiveLock
);
2176 /* replace cid with a combo cid if necessary */
2177 HeapTupleHeaderAdjustCmax(tp
.t_data
, &cid
, &iscombo
);
2179 START_CRIT_SECTION();
2182 * If this transaction commits, the tuple will become DEAD sooner or
2183 * later. Set flag that this page is a candidate for pruning once our xid
2184 * falls below the OldestXmin horizon. If the transaction finally aborts,
2185 * the subsequent page pruning will be a no-op and the hint will be
2188 PageSetPrunable(page
, xid
);
2190 /* store transaction information of xact deleting the tuple */
2191 tp
.t_data
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
2193 HEAP_XMAX_IS_MULTI
|
2196 HeapTupleHeaderClearHotUpdated(tp
.t_data
);
2197 HeapTupleHeaderSetXmax(tp
.t_data
, xid
);
2198 HeapTupleHeaderSetCmax(tp
.t_data
, cid
, iscombo
);
2199 /* Make sure there is no forward chain link in t_ctid */
2200 tp
.t_data
->t_ctid
= tp
.t_self
;
2202 MarkBufferDirty(buffer
);
2205 if (!relation
->rd_istemp
)
2207 xl_heap_delete xlrec
;
2209 XLogRecData rdata
[2];
2211 xlrec
.target
.node
= relation
->rd_node
;
2212 xlrec
.target
.tid
= tp
.t_self
;
2213 rdata
[0].data
= (char *) &xlrec
;
2214 rdata
[0].len
= SizeOfHeapDelete
;
2215 rdata
[0].buffer
= InvalidBuffer
;
2216 rdata
[0].next
= &(rdata
[1]);
2218 rdata
[1].data
= NULL
;
2220 rdata
[1].buffer
= buffer
;
2221 rdata
[1].buffer_std
= true;
2222 rdata
[1].next
= NULL
;
2224 recptr
= XLogInsert(RM_HEAP_ID
, XLOG_HEAP_DELETE
, rdata
);
2226 PageSetLSN(page
, recptr
);
2227 PageSetTLI(page
, ThisTimeLineID
);
2232 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2235 * If the tuple has toasted out-of-line attributes, we need to delete
2236 * those items too. We have to do this before releasing the buffer
2237 * because we need to look at the contents of the tuple, but it's OK to
2238 * release the content lock on the buffer first.
2240 if (relation
->rd_rel
->relkind
!= RELKIND_RELATION
)
2242 /* toast table entries should never be recursively toasted */
2243 Assert(!HeapTupleHasExternal(&tp
));
2245 else if (HeapTupleHasExternal(&tp
))
2246 toast_delete(relation
, &tp
);
2249 * Mark tuple for invalidation from system caches at next command
2250 * boundary. We have to do this before releasing the buffer because we
2251 * need to look at the contents of the tuple.
2253 CacheInvalidateHeapTuple(relation
, &tp
);
2255 /* Now we can release the buffer */
2256 ReleaseBuffer(buffer
);
2259 * Release the lmgr tuple lock, if we had it.
2261 if (have_tuple_lock
)
2262 UnlockTuple(relation
, &(tp
.t_self
), ExclusiveLock
);
2264 pgstat_count_heap_delete(relation
);
2266 return HeapTupleMayBeUpdated
;
2270 * simple_heap_delete - delete a tuple
2272 * This routine may be used to delete a tuple when concurrent updates of
2273 * the target tuple are not expected (for example, because we have a lock
2274 * on the relation associated with the tuple). Any failure is reported
2278 simple_heap_delete(Relation relation
, ItemPointer tid
)
2281 ItemPointerData update_ctid
;
2282 TransactionId update_xmax
;
2284 result
= heap_delete(relation
, tid
,
2285 &update_ctid
, &update_xmax
,
2286 GetCurrentCommandId(true), InvalidSnapshot
,
2287 true /* wait for commit */ );
2290 case HeapTupleSelfUpdated
:
2291 /* Tuple was already updated in current command? */
2292 elog(ERROR
, "tuple already updated by self");
2295 case HeapTupleMayBeUpdated
:
2296 /* done successfully */
2299 case HeapTupleUpdated
:
2300 elog(ERROR
, "tuple concurrently updated");
2304 elog(ERROR
, "unrecognized heap_delete status: %u", result
);
2310 * heap_update - replace a tuple
2312 * NB: do not call this directly unless you are prepared to deal with
2313 * concurrent-update conditions. Use simple_heap_update instead.
2315 * relation - table to be modified (caller must hold suitable lock)
2316 * otid - TID of old tuple to be replaced
2317 * newtup - newly constructed tuple data to store
2318 * ctid - output parameter, used only for failure case (see below)
2319 * update_xmax - output parameter, used only for failure case (see below)
2320 * cid - update command ID (used for visibility test, and stored into
2321 * cmax/cmin if successful)
2322 * crosscheck - if not InvalidSnapshot, also check old tuple against this
2323 * wait - true if should wait for any conflicting update to commit/abort
2325 * Normal, successful return value is HeapTupleMayBeUpdated, which
2326 * actually means we *did* update it. Failure return codes are
2327 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
2328 * (the last only possible if wait == false).
2330 * On success, the header fields of *newtup are updated to match the new
2331 * stored tuple; in particular, newtup->t_self is set to the TID where the
2332 * new tuple was inserted, and its HEAP_ONLY_TUPLE flag is set iff a HOT
2333 * update was done. However, any TOAST changes in the new tuple's
2334 * data are not reflected into *newtup.
2336 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
2337 * If t_ctid is the same as otid, the tuple was deleted; if different, the
2338 * tuple was updated, and t_ctid is the location of the replacement tuple.
2339 * (t_xmax is needed to verify that the replacement tuple matches.)
2342 heap_update(Relation relation
, ItemPointer otid
, HeapTuple newtup
,
2343 ItemPointer ctid
, TransactionId
*update_xmax
,
2344 CommandId cid
, Snapshot crosscheck
, bool wait
)
2347 TransactionId xid
= GetCurrentTransactionId();
2348 Bitmapset
*hot_attrs
;
2350 HeapTupleData oldtup
;
2359 bool have_tuple_lock
= false;
2361 bool use_hot_update
= false;
2363 Assert(ItemPointerIsValid(otid
));
2366 * Fetch the list of attributes to be checked for HOT update. This is
2367 * wasted effort if we fail to update or have to put the new tuple on a
2368 * different page. But we must compute the list before obtaining buffer
2369 * lock --- in the worst case, if we are doing an update on one of the
2370 * relevant system catalogs, we could deadlock if we try to fetch the list
2371 * later. In any case, the relcache caches the data so this is usually
2374 * Note that we get a copy here, so we need not worry about relcache flush
2375 * happening midway through.
2377 hot_attrs
= RelationGetIndexAttrBitmap(relation
);
2379 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(otid
));
2380 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2382 page
= BufferGetPage(buffer
);
2383 lp
= PageGetItemId(page
, ItemPointerGetOffsetNumber(otid
));
2384 Assert(ItemIdIsNormal(lp
));
2386 oldtup
.t_data
= (HeapTupleHeader
) PageGetItem(page
, lp
);
2387 oldtup
.t_len
= ItemIdGetLength(lp
);
2388 oldtup
.t_self
= *otid
;
2391 * Note: beyond this point, use oldtup not otid to refer to old tuple.
2392 * otid may very well point at newtup->t_self, which we will overwrite
2393 * with the new tuple's location, so there's great risk of confusion if we
2398 result
= HeapTupleSatisfiesUpdate(oldtup
.t_data
, cid
, buffer
);
2400 if (result
== HeapTupleInvisible
)
2402 UnlockReleaseBuffer(buffer
);
2403 elog(ERROR
, "attempted to update invisible tuple");
2405 else if (result
== HeapTupleBeingUpdated
&& wait
)
2407 TransactionId xwait
;
2410 /* must copy state data before unlocking buffer */
2411 xwait
= HeapTupleHeaderGetXmax(oldtup
.t_data
);
2412 infomask
= oldtup
.t_data
->t_infomask
;
2414 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2417 * Acquire tuple lock to establish our priority for the tuple (see
2418 * heap_lock_tuple). LockTuple will release us when we are
2419 * next-in-line for the tuple.
2421 * If we are forced to "start over" below, we keep the tuple lock;
2422 * this arranges that we stay at the head of the line while rechecking
2425 if (!have_tuple_lock
)
2427 LockTuple(relation
, &(oldtup
.t_self
), ExclusiveLock
);
2428 have_tuple_lock
= true;
2432 * Sleep until concurrent transaction ends. Note that we don't care
2433 * if the locker has an exclusive or shared lock, because we need
2437 if (infomask
& HEAP_XMAX_IS_MULTI
)
2439 /* wait for multixact */
2440 MultiXactIdWait((MultiXactId
) xwait
);
2441 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2444 * If xwait had just locked the tuple then some other xact could
2445 * update this tuple before we get to this point. Check for xmax
2446 * change, and start over if so.
2448 if (!(oldtup
.t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
2449 !TransactionIdEquals(HeapTupleHeaderGetXmax(oldtup
.t_data
),
2454 * You might think the multixact is necessarily done here, but not
2455 * so: it could have surviving members, namely our own xact or
2456 * other subxacts of this backend. It is legal for us to update
2457 * the tuple in either case, however (the latter case is
2458 * essentially a situation of upgrading our former shared lock to
2459 * exclusive). We don't bother changing the on-disk hint bits
2460 * since we are about to overwrite the xmax altogether.
2465 /* wait for regular transaction to end */
2466 XactLockTableWait(xwait
);
2467 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2470 * xwait is done, but if xwait had just locked the tuple then some
2471 * other xact could update this tuple before we get to this point.
2472 * Check for xmax change, and start over if so.
2474 if ((oldtup
.t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
2475 !TransactionIdEquals(HeapTupleHeaderGetXmax(oldtup
.t_data
),
2479 /* Otherwise check if it committed or aborted */
2480 UpdateXmaxHintBits(oldtup
.t_data
, buffer
, xwait
);
2484 * We may overwrite if previous xmax aborted, or if it committed but
2485 * only locked the tuple without updating it.
2487 if (oldtup
.t_data
->t_infomask
& (HEAP_XMAX_INVALID
|
2489 result
= HeapTupleMayBeUpdated
;
2491 result
= HeapTupleUpdated
;
2494 if (crosscheck
!= InvalidSnapshot
&& result
== HeapTupleMayBeUpdated
)
2496 /* Perform additional check for serializable RI updates */
2497 if (!HeapTupleSatisfiesVisibility(&oldtup
, crosscheck
, buffer
))
2498 result
= HeapTupleUpdated
;
2501 if (result
!= HeapTupleMayBeUpdated
)
2503 Assert(result
== HeapTupleSelfUpdated
||
2504 result
== HeapTupleUpdated
||
2505 result
== HeapTupleBeingUpdated
);
2506 Assert(!(oldtup
.t_data
->t_infomask
& HEAP_XMAX_INVALID
));
2507 *ctid
= oldtup
.t_data
->t_ctid
;
2508 *update_xmax
= HeapTupleHeaderGetXmax(oldtup
.t_data
);
2509 UnlockReleaseBuffer(buffer
);
2510 if (have_tuple_lock
)
2511 UnlockTuple(relation
, &(oldtup
.t_self
), ExclusiveLock
);
2512 bms_free(hot_attrs
);
2516 /* Fill in OID and transaction status data for newtup */
2517 if (relation
->rd_rel
->relhasoids
)
2520 /* this is redundant with an Assert in HeapTupleSetOid */
2521 Assert(newtup
->t_data
->t_infomask
& HEAP_HASOID
);
2523 HeapTupleSetOid(newtup
, HeapTupleGetOid(&oldtup
));
2527 /* check there is not space for an OID */
2528 Assert(!(newtup
->t_data
->t_infomask
& HEAP_HASOID
));
2531 newtup
->t_data
->t_infomask
&= ~(HEAP_XACT_MASK
);
2532 newtup
->t_data
->t_infomask2
&= ~(HEAP2_XACT_MASK
);
2533 newtup
->t_data
->t_infomask
|= (HEAP_XMAX_INVALID
| HEAP_UPDATED
);
2534 HeapTupleHeaderSetXmin(newtup
->t_data
, xid
);
2535 HeapTupleHeaderSetCmin(newtup
->t_data
, cid
);
2536 HeapTupleHeaderSetXmax(newtup
->t_data
, 0); /* for cleanliness */
2539 * Replace cid with a combo cid if necessary. Note that we already put
2540 * the plain cid into the new tuple.
2542 HeapTupleHeaderAdjustCmax(oldtup
.t_data
, &cid
, &iscombo
);
2545 * If the toaster needs to be activated, OR if the new tuple will not fit
2546 * on the same page as the old, then we need to release the content lock
2547 * (but not the pin!) on the old tuple's buffer while we are off doing
2548 * TOAST and/or table-file-extension work. We must mark the old tuple to
2549 * show that it's already being updated, else other processes may try to
2550 * update it themselves.
2552 * We need to invoke the toaster if there are already any out-of-line
2553 * toasted values present, or if the new tuple is over-threshold.
2555 if (relation
->rd_rel
->relkind
!= RELKIND_RELATION
)
2557 /* toast table entries should never be recursively toasted */
2558 Assert(!HeapTupleHasExternal(&oldtup
));
2559 Assert(!HeapTupleHasExternal(newtup
));
2563 need_toast
= (HeapTupleHasExternal(&oldtup
) ||
2564 HeapTupleHasExternal(newtup
) ||
2565 newtup
->t_len
> TOAST_TUPLE_THRESHOLD
);
2567 pagefree
= PageGetHeapFreeSpace(page
);
2569 newtupsize
= MAXALIGN(newtup
->t_len
);
2571 if (need_toast
|| newtupsize
> pagefree
)
2573 /* Clear obsolete visibility flags ... */
2574 oldtup
.t_data
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
2576 HEAP_XMAX_IS_MULTI
|
2579 HeapTupleClearHotUpdated(&oldtup
);
2580 /* ... and store info about transaction updating this tuple */
2581 HeapTupleHeaderSetXmax(oldtup
.t_data
, xid
);
2582 HeapTupleHeaderSetCmax(oldtup
.t_data
, cid
, iscombo
);
2583 /* temporarily make it look not-updated */
2584 oldtup
.t_data
->t_ctid
= oldtup
.t_self
;
2585 already_marked
= true;
2586 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2589 * Let the toaster do its thing, if needed.
2591 * Note: below this point, heaptup is the data we actually intend to
2592 * store into the relation; newtup is the caller's original untoasted
2597 /* Note we always use WAL and FSM during updates */
2598 heaptup
= toast_insert_or_update(relation
, newtup
, &oldtup
,
2600 newtupsize
= MAXALIGN(heaptup
->t_len
);
2606 * Now, do we need a new page for the tuple, or not? This is a bit
2607 * tricky since someone else could have added tuples to the page while
2608 * we weren't looking. We have to recheck the available space after
2609 * reacquiring the buffer lock. But don't bother to do that if the
2610 * former amount of free space is still not enough; it's unlikely
2611 * there's more free now than before.
2613 * What's more, if we need to get a new page, we will need to acquire
2614 * buffer locks on both old and new pages. To avoid deadlock against
2615 * some other backend trying to get the same two locks in the other
2616 * order, we must be consistent about the order we get the locks in.
2617 * We use the rule "lock the lower-numbered page of the relation
2618 * first". To implement this, we must do RelationGetBufferForTuple
2619 * while not holding the lock on the old page, and we must rely on it
2620 * to get the locks on both pages in the correct order.
2622 if (newtupsize
> pagefree
)
2624 /* Assume there's no chance to put heaptup on same page. */
2625 newbuf
= RelationGetBufferForTuple(relation
, heaptup
->t_len
,
2630 /* Re-acquire the lock on the old tuple's page. */
2631 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2632 /* Re-check using the up-to-date free space */
2633 pagefree
= PageGetHeapFreeSpace(page
);
2634 if (newtupsize
> pagefree
)
2637 * Rats, it doesn't fit anymore. We must now unlock and
2638 * relock to avoid deadlock. Fortunately, this path should
2641 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2642 newbuf
= RelationGetBufferForTuple(relation
, heaptup
->t_len
,
2647 /* OK, it fits here, so we're done. */
2654 /* No TOAST work needed, and it'll fit on same page */
2655 already_marked
= false;
2661 * At this point newbuf and buffer are both pinned and locked, and newbuf
2662 * has enough space for the new tuple. If they are the same buffer, only
2666 if (newbuf
== buffer
)
2669 * Since the new tuple is going into the same page, we might be able
2670 * to do a HOT update. Check if any of the index columns have been
2671 * changed. If not, then HOT update is possible.
2673 if (HeapSatisfiesHOTUpdate(relation
, hot_attrs
, &oldtup
, heaptup
))
2674 use_hot_update
= true;
2678 /* Set a hint that the old page could use prune/defrag */
2682 /* NO EREPORT(ERROR) from here till changes are logged */
2683 START_CRIT_SECTION();
2686 * If this transaction commits, the old tuple will become DEAD sooner or
2687 * later. Set flag that this page is a candidate for pruning once our xid
2688 * falls below the OldestXmin horizon. If the transaction finally aborts,
2689 * the subsequent page pruning will be a no-op and the hint will be
2692 * XXX Should we set hint on newbuf as well? If the transaction aborts,
2693 * there would be a prunable tuple in the newbuf; but for now we choose
2694 * not to optimize for aborts. Note that heap_xlog_update must be kept in
2695 * sync if this decision changes.
2697 PageSetPrunable(page
, xid
);
2701 /* Mark the old tuple as HOT-updated */
2702 HeapTupleSetHotUpdated(&oldtup
);
2703 /* And mark the new tuple as heap-only */
2704 HeapTupleSetHeapOnly(heaptup
);
2705 /* Mark the caller's copy too, in case different from heaptup */
2706 HeapTupleSetHeapOnly(newtup
);
2710 /* Make sure tuples are correctly marked as not-HOT */
2711 HeapTupleClearHotUpdated(&oldtup
);
2712 HeapTupleClearHeapOnly(heaptup
);
2713 HeapTupleClearHeapOnly(newtup
);
2716 RelationPutHeapTuple(relation
, newbuf
, heaptup
); /* insert new tuple */
2718 if (!already_marked
)
2720 /* Clear obsolete visibility flags ... */
2721 oldtup
.t_data
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
2723 HEAP_XMAX_IS_MULTI
|
2726 /* ... and store info about transaction updating this tuple */
2727 HeapTupleHeaderSetXmax(oldtup
.t_data
, xid
);
2728 HeapTupleHeaderSetCmax(oldtup
.t_data
, cid
, iscombo
);
2731 /* record address of new tuple in t_ctid of old one */
2732 oldtup
.t_data
->t_ctid
= heaptup
->t_self
;
2734 if (newbuf
!= buffer
)
2735 MarkBufferDirty(newbuf
);
2736 MarkBufferDirty(buffer
);
2739 if (!relation
->rd_istemp
)
2741 XLogRecPtr recptr
= log_heap_update(relation
, buffer
, oldtup
.t_self
,
2742 newbuf
, heaptup
, false);
2744 if (newbuf
!= buffer
)
2746 PageSetLSN(BufferGetPage(newbuf
), recptr
);
2747 PageSetTLI(BufferGetPage(newbuf
), ThisTimeLineID
);
2749 PageSetLSN(BufferGetPage(buffer
), recptr
);
2750 PageSetTLI(BufferGetPage(buffer
), ThisTimeLineID
);
2755 if (newbuf
!= buffer
)
2756 LockBuffer(newbuf
, BUFFER_LOCK_UNLOCK
);
2757 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2760 * Mark old tuple for invalidation from system caches at next command
2761 * boundary. We have to do this before releasing the buffer because we
2762 * need to look at the contents of the tuple.
2764 CacheInvalidateHeapTuple(relation
, &oldtup
);
2766 /* Now we can release the buffer(s) */
2767 if (newbuf
!= buffer
)
2768 ReleaseBuffer(newbuf
);
2769 ReleaseBuffer(buffer
);
2772 * If new tuple is cachable, mark it for invalidation from the caches in
2773 * case we abort. Note it is OK to do this after releasing the buffer,
2774 * because the heaptup data structure is all in local memory, not in the
2777 CacheInvalidateHeapTuple(relation
, heaptup
);
2780 * Release the lmgr tuple lock, if we had it.
2782 if (have_tuple_lock
)
2783 UnlockTuple(relation
, &(oldtup
.t_self
), ExclusiveLock
);
2785 pgstat_count_heap_update(relation
, use_hot_update
);
2788 * If heaptup is a private copy, release it. Don't forget to copy t_self
2789 * back to the caller's image, too.
2791 if (heaptup
!= newtup
)
2793 newtup
->t_self
= heaptup
->t_self
;
2794 heap_freetuple(heaptup
);
2797 bms_free(hot_attrs
);
2799 return HeapTupleMayBeUpdated
;
2803 * Check if the specified attribute's value is same in both given tuples.
2804 * Subroutine for HeapSatisfiesHOTUpdate.
2807 heap_tuple_attr_equals(TupleDesc tupdesc
, int attrnum
,
2808 HeapTuple tup1
, HeapTuple tup2
)
2814 Form_pg_attribute att
;
2817 * If it's a whole-tuple reference, say "not equal". It's not really
2818 * worth supporting this case, since it could only succeed after a no-op
2819 * update, which is hardly a case worth optimizing for.
2825 * Likewise, automatically say "not equal" for any system attribute other
2826 * than OID and tableOID; we cannot expect these to be consistent in a HOT
2827 * chain, or even to be set correctly yet in the new tuple.
2831 if (attrnum
!= ObjectIdAttributeNumber
&&
2832 attrnum
!= TableOidAttributeNumber
)
2837 * Extract the corresponding values. XXX this is pretty inefficient if
2838 * there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
2839 * single heap_deform_tuple call on each tuple, instead? But that doesn't
2840 * work for system columns ...
2842 value1
= heap_getattr(tup1
, attrnum
, tupdesc
, &isnull1
);
2843 value2
= heap_getattr(tup2
, attrnum
, tupdesc
, &isnull2
);
2846 * If one value is NULL and other is not, then they are certainly not
2849 if (isnull1
!= isnull2
)
2853 * If both are NULL, they can be considered equal.
2859 * We do simple binary comparison of the two datums. This may be overly
2860 * strict because there can be multiple binary representations for the
2861 * same logical value. But we should be OK as long as there are no false
2862 * positives. Using a type-specific equality operator is messy because
2863 * there could be multiple notions of equality in different operator
2864 * classes; furthermore, we cannot safely invoke user-defined functions
2865 * while holding exclusive buffer lock.
2869 /* The only allowed system columns are OIDs, so do this */
2870 return (DatumGetObjectId(value1
) == DatumGetObjectId(value2
));
2874 Assert(attrnum
<= tupdesc
->natts
);
2875 att
= tupdesc
->attrs
[attrnum
- 1];
2876 return datumIsEqual(value1
, value2
, att
->attbyval
, att
->attlen
);
2881 * Check if the old and new tuples represent a HOT-safe update. To be able
2882 * to do a HOT update, we must not have changed any columns used in index
2885 * The set of attributes to be checked is passed in (we dare not try to
2886 * compute it while holding exclusive buffer lock...) NOTE that hot_attrs
2887 * is destructively modified! That is OK since this is invoked at most once
2890 * Returns true if safe to do HOT update.
2893 HeapSatisfiesHOTUpdate(Relation relation
, Bitmapset
*hot_attrs
,
2894 HeapTuple oldtup
, HeapTuple newtup
)
2898 while ((attrnum
= bms_first_member(hot_attrs
)) >= 0)
2900 /* Adjust for system attributes */
2901 attrnum
+= FirstLowInvalidHeapAttributeNumber
;
2903 /* If the attribute value has changed, we can't do HOT update */
2904 if (!heap_tuple_attr_equals(RelationGetDescr(relation
), attrnum
,
2913 * simple_heap_update - replace a tuple
2915 * This routine may be used to update a tuple when concurrent updates of
2916 * the target tuple are not expected (for example, because we have a lock
2917 * on the relation associated with the tuple). Any failure is reported
2921 simple_heap_update(Relation relation
, ItemPointer otid
, HeapTuple tup
)
2924 ItemPointerData update_ctid
;
2925 TransactionId update_xmax
;
2927 result
= heap_update(relation
, otid
, tup
,
2928 &update_ctid
, &update_xmax
,
2929 GetCurrentCommandId(true), InvalidSnapshot
,
2930 true /* wait for commit */ );
2933 case HeapTupleSelfUpdated
:
2934 /* Tuple was already updated in current command? */
2935 elog(ERROR
, "tuple already updated by self");
2938 case HeapTupleMayBeUpdated
:
2939 /* done successfully */
2942 case HeapTupleUpdated
:
2943 elog(ERROR
, "tuple concurrently updated");
2947 elog(ERROR
, "unrecognized heap_update status: %u", result
);
2953 * heap_lock_tuple - lock a tuple in shared or exclusive mode
2955 * Note that this acquires a buffer pin, which the caller must release.
2958 * relation: relation containing tuple (caller must hold suitable lock)
2959 * tuple->t_self: TID of tuple to lock (rest of struct need not be valid)
2960 * cid: current command ID (used for visibility test, and stored into
2961 * tuple's cmax if lock is successful)
2962 * mode: indicates if shared or exclusive tuple lock is desired
2963 * nowait: if true, ereport rather than blocking if lock not available
2965 * Output parameters:
2966 * *tuple: all fields filled in
2967 * *buffer: set to buffer holding tuple (pinned but not locked at exit)
2968 * *ctid: set to tuple's t_ctid, but only in failure cases
2969 * *update_xmax: set to tuple's xmax, but only in failure cases
2971 * Function result may be:
2972 * HeapTupleMayBeUpdated: lock was successfully acquired
2973 * HeapTupleSelfUpdated: lock failed because tuple updated by self
2974 * HeapTupleUpdated: lock failed because tuple updated by other xact
2976 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
2977 * If t_ctid is the same as t_self, the tuple was deleted; if different, the
2978 * tuple was updated, and t_ctid is the location of the replacement tuple.
2979 * (t_xmax is needed to verify that the replacement tuple matches.)
2982 * NOTES: because the shared-memory lock table is of finite size, but users
2983 * could reasonably want to lock large numbers of tuples, we do not rely on
2984 * the standard lock manager to store tuple-level locks over the long term.
2985 * Instead, a tuple is marked as locked by setting the current transaction's
2986 * XID as its XMAX, and setting additional infomask bits to distinguish this
2987 * usage from the more normal case of having deleted the tuple. When
2988 * multiple transactions concurrently share-lock a tuple, the first locker's
2989 * XID is replaced in XMAX with a MultiTransactionId representing the set of
2990 * XIDs currently holding share-locks.
2992 * When it is necessary to wait for a tuple-level lock to be released, the
2993 * basic delay is provided by XactLockTableWait or MultiXactIdWait on the
2994 * contents of the tuple's XMAX. However, that mechanism will release all
2995 * waiters concurrently, so there would be a race condition as to which
2996 * waiter gets the tuple, potentially leading to indefinite starvation of
2997 * some waiters. The possibility of share-locking makes the problem much
2998 * worse --- a steady stream of share-lockers can easily block an exclusive
2999 * locker forever. To provide more reliable semantics about who gets a
3000 * tuple-level lock first, we use the standard lock manager. The protocol
3001 * for waiting for a tuple-level lock is really
3003 * XactLockTableWait()
3004 * mark tuple as locked by me
3006 * When there are multiple waiters, arbitration of who is to get the lock next
3007 * is provided by LockTuple(). However, at most one tuple-level lock will
3008 * be held or awaited per backend at any time, so we don't risk overflow
3009 * of the lock table. Note that incoming share-lockers are required to
3010 * do LockTuple as well, if there is any conflict, to ensure that they don't
3011 * starve out waiting exclusive-lockers. However, if there is not any active
3012 * conflict for a tuple, we don't incur any extra overhead.
3015 heap_lock_tuple(Relation relation
, HeapTuple tuple
, Buffer
*buffer
,
3016 ItemPointer ctid
, TransactionId
*update_xmax
,
3017 CommandId cid
, LockTupleMode mode
, bool nowait
)
3020 ItemPointer tid
= &(tuple
->t_self
);
3025 uint16 old_infomask
;
3026 uint16 new_infomask
;
3027 LOCKMODE tuple_lock_type
;
3028 bool have_tuple_lock
= false;
3030 tuple_lock_type
= (mode
== LockTupleShared
) ? ShareLock
: ExclusiveLock
;
3032 *buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(tid
));
3033 LockBuffer(*buffer
, BUFFER_LOCK_EXCLUSIVE
);
3035 page
= BufferGetPage(*buffer
);
3036 lp
= PageGetItemId(page
, ItemPointerGetOffsetNumber(tid
));
3037 Assert(ItemIdIsNormal(lp
));
3039 tuple
->t_data
= (HeapTupleHeader
) PageGetItem(page
, lp
);
3040 tuple
->t_len
= ItemIdGetLength(lp
);
3041 tuple
->t_tableOid
= RelationGetRelid(relation
);
3044 result
= HeapTupleSatisfiesUpdate(tuple
->t_data
, cid
, *buffer
);
3046 if (result
== HeapTupleInvisible
)
3048 UnlockReleaseBuffer(*buffer
);
3049 elog(ERROR
, "attempted to lock invisible tuple");
3051 else if (result
== HeapTupleBeingUpdated
)
3053 TransactionId xwait
;
3056 /* must copy state data before unlocking buffer */
3057 xwait
= HeapTupleHeaderGetXmax(tuple
->t_data
);
3058 infomask
= tuple
->t_data
->t_infomask
;
3060 LockBuffer(*buffer
, BUFFER_LOCK_UNLOCK
);
3063 * If we wish to acquire share lock, and the tuple is already
3064 * share-locked by a multixact that includes any subtransaction of the
3065 * current top transaction, then we effectively hold the desired lock
3066 * already. We *must* succeed without trying to take the tuple lock,
3067 * else we will deadlock against anyone waiting to acquire exclusive
3068 * lock. We don't need to make any state changes in this case.
3070 if (mode
== LockTupleShared
&&
3071 (infomask
& HEAP_XMAX_IS_MULTI
) &&
3072 MultiXactIdIsCurrent((MultiXactId
) xwait
))
3074 Assert(infomask
& HEAP_XMAX_SHARED_LOCK
);
3075 /* Probably can't hold tuple lock here, but may as well check */
3076 if (have_tuple_lock
)
3077 UnlockTuple(relation
, tid
, tuple_lock_type
);
3078 return HeapTupleMayBeUpdated
;
3082 * Acquire tuple lock to establish our priority for the tuple.
3083 * LockTuple will release us when we are next-in-line for the tuple.
3084 * We must do this even if we are share-locking.
3086 * If we are forced to "start over" below, we keep the tuple lock;
3087 * this arranges that we stay at the head of the line while rechecking
3090 if (!have_tuple_lock
)
3094 if (!ConditionalLockTuple(relation
, tid
, tuple_lock_type
))
3096 (errcode(ERRCODE_LOCK_NOT_AVAILABLE
),
3097 errmsg("could not obtain lock on row in relation \"%s\"",
3098 RelationGetRelationName(relation
))));
3101 LockTuple(relation
, tid
, tuple_lock_type
);
3102 have_tuple_lock
= true;
3105 if (mode
== LockTupleShared
&& (infomask
& HEAP_XMAX_SHARED_LOCK
))
3108 * Acquiring sharelock when there's at least one sharelocker
3109 * already. We need not wait for him/them to complete.
3111 LockBuffer(*buffer
, BUFFER_LOCK_EXCLUSIVE
);
3114 * Make sure it's still a shared lock, else start over. (It's OK
3115 * if the ownership of the shared lock has changed, though.)
3117 if (!(tuple
->t_data
->t_infomask
& HEAP_XMAX_SHARED_LOCK
))
3120 else if (infomask
& HEAP_XMAX_IS_MULTI
)
3122 /* wait for multixact to end */
3125 if (!ConditionalMultiXactIdWait((MultiXactId
) xwait
))
3127 (errcode(ERRCODE_LOCK_NOT_AVAILABLE
),
3128 errmsg("could not obtain lock on row in relation \"%s\"",
3129 RelationGetRelationName(relation
))));
3132 MultiXactIdWait((MultiXactId
) xwait
);
3134 LockBuffer(*buffer
, BUFFER_LOCK_EXCLUSIVE
);
3137 * If xwait had just locked the tuple then some other xact could
3138 * update this tuple before we get to this point. Check for xmax
3139 * change, and start over if so.
3141 if (!(tuple
->t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
3142 !TransactionIdEquals(HeapTupleHeaderGetXmax(tuple
->t_data
),
3147 * You might think the multixact is necessarily done here, but not
3148 * so: it could have surviving members, namely our own xact or
3149 * other subxacts of this backend. It is legal for us to lock the
3150 * tuple in either case, however. We don't bother changing the
3151 * on-disk hint bits since we are about to overwrite the xmax
3157 /* wait for regular transaction to end */
3160 if (!ConditionalXactLockTableWait(xwait
))
3162 (errcode(ERRCODE_LOCK_NOT_AVAILABLE
),
3163 errmsg("could not obtain lock on row in relation \"%s\"",
3164 RelationGetRelationName(relation
))));
3167 XactLockTableWait(xwait
);
3169 LockBuffer(*buffer
, BUFFER_LOCK_EXCLUSIVE
);
3172 * xwait is done, but if xwait had just locked the tuple then some
3173 * other xact could update this tuple before we get to this point.
3174 * Check for xmax change, and start over if so.
3176 if ((tuple
->t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
3177 !TransactionIdEquals(HeapTupleHeaderGetXmax(tuple
->t_data
),
3181 /* Otherwise check if it committed or aborted */
3182 UpdateXmaxHintBits(tuple
->t_data
, *buffer
, xwait
);
3186 * We may lock if previous xmax aborted, or if it committed but only
3187 * locked the tuple without updating it. The case where we didn't
3188 * wait because we are joining an existing shared lock is correctly
3191 if (tuple
->t_data
->t_infomask
& (HEAP_XMAX_INVALID
|
3193 result
= HeapTupleMayBeUpdated
;
3195 result
= HeapTupleUpdated
;
3198 if (result
!= HeapTupleMayBeUpdated
)
3200 Assert(result
== HeapTupleSelfUpdated
|| result
== HeapTupleUpdated
);
3201 Assert(!(tuple
->t_data
->t_infomask
& HEAP_XMAX_INVALID
));
3202 *ctid
= tuple
->t_data
->t_ctid
;
3203 *update_xmax
= HeapTupleHeaderGetXmax(tuple
->t_data
);
3204 LockBuffer(*buffer
, BUFFER_LOCK_UNLOCK
);
3205 if (have_tuple_lock
)
3206 UnlockTuple(relation
, tid
, tuple_lock_type
);
3211 * We might already hold the desired lock (or stronger), possibly under a
3212 * different subtransaction of the current top transaction. If so, there
3213 * is no need to change state or issue a WAL record. We already handled
3214 * the case where this is true for xmax being a MultiXactId, so now check
3215 * for cases where it is a plain TransactionId.
3217 * Note in particular that this covers the case where we already hold
3218 * exclusive lock on the tuple and the caller only wants shared lock. It
3219 * would certainly not do to give up the exclusive lock.
3221 xmax
= HeapTupleHeaderGetXmax(tuple
->t_data
);
3222 old_infomask
= tuple
->t_data
->t_infomask
;
3224 if (!(old_infomask
& (HEAP_XMAX_INVALID
|
3225 HEAP_XMAX_COMMITTED
|
3226 HEAP_XMAX_IS_MULTI
)) &&
3227 (mode
== LockTupleShared
?
3228 (old_infomask
& HEAP_IS_LOCKED
) :
3229 (old_infomask
& HEAP_XMAX_EXCL_LOCK
)) &&
3230 TransactionIdIsCurrentTransactionId(xmax
))
3232 LockBuffer(*buffer
, BUFFER_LOCK_UNLOCK
);
3233 /* Probably can't hold tuple lock here, but may as well check */
3234 if (have_tuple_lock
)
3235 UnlockTuple(relation
, tid
, tuple_lock_type
);
3236 return HeapTupleMayBeUpdated
;
3240 * Compute the new xmax and infomask to store into the tuple. Note we do
3241 * not modify the tuple just yet, because that would leave it in the wrong
3242 * state if multixact.c elogs.
3244 xid
= GetCurrentTransactionId();
3246 new_infomask
= old_infomask
& ~(HEAP_XMAX_COMMITTED
|
3248 HEAP_XMAX_IS_MULTI
|
3252 if (mode
== LockTupleShared
)
3255 * If this is the first acquisition of a shared lock in the current
3256 * transaction, set my per-backend OldestMemberMXactId setting. We can
3257 * be certain that the transaction will never become a member of any
3258 * older MultiXactIds than that. (We have to do this even if we end
3259 * up just using our own TransactionId below, since some other backend
3260 * could incorporate our XID into a MultiXact immediately afterwards.)
3262 MultiXactIdSetOldestMember();
3264 new_infomask
|= HEAP_XMAX_SHARED_LOCK
;
3267 * Check to see if we need a MultiXactId because there are multiple
3270 * HeapTupleSatisfiesUpdate will have set the HEAP_XMAX_INVALID bit if
3271 * the xmax was a MultiXactId but it was not running anymore. There is
3272 * a race condition, which is that the MultiXactId may have finished
3273 * since then, but that uncommon case is handled within
3274 * MultiXactIdExpand.
3276 * There is a similar race condition possible when the old xmax was a
3277 * regular TransactionId. We test TransactionIdIsInProgress again
3278 * just to narrow the window, but it's still possible to end up
3279 * creating an unnecessary MultiXactId. Fortunately this is harmless.
3281 if (!(old_infomask
& (HEAP_XMAX_INVALID
| HEAP_XMAX_COMMITTED
)))
3283 if (old_infomask
& HEAP_XMAX_IS_MULTI
)
3286 * If the XMAX is already a MultiXactId, then we need to
3287 * expand it to include our own TransactionId.
3289 xid
= MultiXactIdExpand((MultiXactId
) xmax
, xid
);
3290 new_infomask
|= HEAP_XMAX_IS_MULTI
;
3292 else if (TransactionIdIsInProgress(xmax
))
3295 * If the XMAX is a valid TransactionId, then we need to
3296 * create a new MultiXactId that includes both the old locker
3297 * and our own TransactionId.
3299 xid
= MultiXactIdCreate(xmax
, xid
);
3300 new_infomask
|= HEAP_XMAX_IS_MULTI
;
3305 * Can get here iff HeapTupleSatisfiesUpdate saw the old xmax
3306 * as running, but it finished before
3307 * TransactionIdIsInProgress() got to run. Treat it like
3308 * there's no locker in the tuple.
3315 * There was no previous locker, so just insert our own
3322 /* We want an exclusive lock on the tuple */
3323 new_infomask
|= HEAP_XMAX_EXCL_LOCK
;
3326 START_CRIT_SECTION();
3329 * Store transaction information of xact locking the tuple.
3331 * Note: Cmax is meaningless in this context, so don't set it; this avoids
3332 * possibly generating a useless combo CID.
3334 tuple
->t_data
->t_infomask
= new_infomask
;
3335 HeapTupleHeaderClearHotUpdated(tuple
->t_data
);
3336 HeapTupleHeaderSetXmax(tuple
->t_data
, xid
);
3337 /* Make sure there is no forward chain link in t_ctid */
3338 tuple
->t_data
->t_ctid
= *tid
;
3340 MarkBufferDirty(*buffer
);
3343 * XLOG stuff. You might think that we don't need an XLOG record because
3344 * there is no state change worth restoring after a crash. You would be
3345 * wrong however: we have just written either a TransactionId or a
3346 * MultiXactId that may never have been seen on disk before, and we need
3347 * to make sure that there are XLOG entries covering those ID numbers.
3348 * Else the same IDs might be re-used after a crash, which would be
3349 * disastrous if this page made it to disk before the crash. Essentially
3350 * we have to enforce the WAL log-before-data rule even in this case.
3351 * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
3352 * entries for everything anyway.)
3354 if (!relation
->rd_istemp
)
3358 XLogRecData rdata
[2];
3360 xlrec
.target
.node
= relation
->rd_node
;
3361 xlrec
.target
.tid
= tuple
->t_self
;
3362 xlrec
.locking_xid
= xid
;
3363 xlrec
.xid_is_mxact
= ((new_infomask
& HEAP_XMAX_IS_MULTI
) != 0);
3364 xlrec
.shared_lock
= (mode
== LockTupleShared
);
3365 rdata
[0].data
= (char *) &xlrec
;
3366 rdata
[0].len
= SizeOfHeapLock
;
3367 rdata
[0].buffer
= InvalidBuffer
;
3368 rdata
[0].next
= &(rdata
[1]);
3370 rdata
[1].data
= NULL
;
3372 rdata
[1].buffer
= *buffer
;
3373 rdata
[1].buffer_std
= true;
3374 rdata
[1].next
= NULL
;
3376 recptr
= XLogInsert(RM_HEAP_ID
, XLOG_HEAP_LOCK
, rdata
);
3378 PageSetLSN(page
, recptr
);
3379 PageSetTLI(page
, ThisTimeLineID
);
3384 LockBuffer(*buffer
, BUFFER_LOCK_UNLOCK
);
3387 * Now that we have successfully marked the tuple as locked, we can
3388 * release the lmgr tuple lock, if we had it.
3390 if (have_tuple_lock
)
3391 UnlockTuple(relation
, tid
, tuple_lock_type
);
3393 return HeapTupleMayBeUpdated
;
3398 * heap_inplace_update - update a tuple "in place" (ie, overwrite it)
3400 * Overwriting violates both MVCC and transactional safety, so the uses
3401 * of this function in Postgres are extremely limited. Nonetheless we
3402 * find some places to use it.
3404 * The tuple cannot change size, and therefore it's reasonable to assume
3405 * that its null bitmap (if any) doesn't change either. So we just
3406 * overwrite the data portion of the tuple without touching the null
3407 * bitmap or any of the header fields.
3409 * tuple is an in-memory tuple structure containing the data to be written
3410 * over the target tuple. Also, tuple->t_self identifies the target tuple.
3413 heap_inplace_update(Relation relation
, HeapTuple tuple
)
3417 OffsetNumber offnum
;
3419 HeapTupleHeader htup
;
3423 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(&(tuple
->t_self
)));
3424 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
3425 page
= (Page
) BufferGetPage(buffer
);
3427 offnum
= ItemPointerGetOffsetNumber(&(tuple
->t_self
));
3428 if (PageGetMaxOffsetNumber(page
) >= offnum
)
3429 lp
= PageGetItemId(page
, offnum
);
3431 if (PageGetMaxOffsetNumber(page
) < offnum
|| !ItemIdIsNormal(lp
))
3432 elog(ERROR
, "heap_inplace_update: invalid lp");
3434 htup
= (HeapTupleHeader
) PageGetItem(page
, lp
);
3436 oldlen
= ItemIdGetLength(lp
) - htup
->t_hoff
;
3437 newlen
= tuple
->t_len
- tuple
->t_data
->t_hoff
;
3438 if (oldlen
!= newlen
|| htup
->t_hoff
!= tuple
->t_data
->t_hoff
)
3439 elog(ERROR
, "heap_inplace_update: wrong tuple length");
3441 /* NO EREPORT(ERROR) from here till changes are logged */
3442 START_CRIT_SECTION();
3444 memcpy((char *) htup
+ htup
->t_hoff
,
3445 (char *) tuple
->t_data
+ tuple
->t_data
->t_hoff
,
3448 MarkBufferDirty(buffer
);
3451 if (!relation
->rd_istemp
)
3453 xl_heap_inplace xlrec
;
3455 XLogRecData rdata
[2];
3457 xlrec
.target
.node
= relation
->rd_node
;
3458 xlrec
.target
.tid
= tuple
->t_self
;
3460 rdata
[0].data
= (char *) &xlrec
;
3461 rdata
[0].len
= SizeOfHeapInplace
;
3462 rdata
[0].buffer
= InvalidBuffer
;
3463 rdata
[0].next
= &(rdata
[1]);
3465 rdata
[1].data
= (char *) htup
+ htup
->t_hoff
;
3466 rdata
[1].len
= newlen
;
3467 rdata
[1].buffer
= buffer
;
3468 rdata
[1].buffer_std
= true;
3469 rdata
[1].next
= NULL
;
3471 recptr
= XLogInsert(RM_HEAP_ID
, XLOG_HEAP_INPLACE
, rdata
);
3473 PageSetLSN(page
, recptr
);
3474 PageSetTLI(page
, ThisTimeLineID
);
3479 UnlockReleaseBuffer(buffer
);
3481 /* Send out shared cache inval if necessary */
3482 if (!IsBootstrapProcessingMode())
3483 CacheInvalidateHeapTuple(relation
, tuple
);
3490 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
3491 * are older than the specified cutoff XID. If so, replace them with
3492 * FrozenTransactionId or InvalidTransactionId as appropriate, and return
3493 * TRUE. Return FALSE if nothing was changed.
3495 * It is assumed that the caller has checked the tuple with
3496 * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
3497 * (else we should be removing the tuple, not freezing it).
3499 * NB: cutoff_xid *must* be <= the current global xmin, to ensure that any
3500 * XID older than it could neither be running nor seen as running by any
3501 * open transaction. This ensures that the replacement will not change
3502 * anyone's idea of the tuple state. Also, since we assume the tuple is
3503 * not HEAPTUPLE_DEAD, the fact that an XID is not still running allows us
3504 * to assume that it is either committed good or aborted, as appropriate;
3505 * so we need no external state checks to decide what to do. (This is good
3506 * because this function is applied during WAL recovery, when we don't have
3507 * access to any such state, and can't depend on the hint bits to be set.)
3509 * In lazy VACUUM, we call this while initially holding only a shared lock
3510 * on the tuple's buffer. If any change is needed, we trade that in for an
3511 * exclusive lock before making the change. Caller should pass the buffer ID
3512 * if shared lock is held, InvalidBuffer if exclusive lock is already held.
3514 * Note: it might seem we could make the changes without exclusive lock, since
3515 * TransactionId read/write is assumed atomic anyway. However there is a race
3516 * condition: someone who just fetched an old XID that we overwrite here could
3517 * conceivably not finish checking the XID against pg_clog before we finish
3518 * the VACUUM and perhaps truncate off the part of pg_clog he needs. Getting
3519 * exclusive lock ensures no other backend is in process of checking the
3520 * tuple status. Also, getting exclusive lock makes it safe to adjust the
3524 heap_freeze_tuple(HeapTupleHeader tuple
, TransactionId cutoff_xid
,
3527 bool changed
= false;
3530 xid
= HeapTupleHeaderGetXmin(tuple
);
3531 if (TransactionIdIsNormal(xid
) &&
3532 TransactionIdPrecedes(xid
, cutoff_xid
))
3534 if (buf
!= InvalidBuffer
)
3536 /* trade in share lock for exclusive lock */
3537 LockBuffer(buf
, BUFFER_LOCK_UNLOCK
);
3538 LockBuffer(buf
, BUFFER_LOCK_EXCLUSIVE
);
3539 buf
= InvalidBuffer
;
3541 HeapTupleHeaderSetXmin(tuple
, FrozenTransactionId
);
3544 * Might as well fix the hint bits too; usually XMIN_COMMITTED will
3545 * already be set here, but there's a small chance not.
3547 Assert(!(tuple
->t_infomask
& HEAP_XMIN_INVALID
));
3548 tuple
->t_infomask
|= HEAP_XMIN_COMMITTED
;
3553 * When we release shared lock, it's possible for someone else to change
3554 * xmax before we get the lock back, so repeat the check after acquiring
3555 * exclusive lock. (We don't need this pushup for xmin, because only
3556 * VACUUM could be interested in changing an existing tuple's xmin, and
3557 * there's only one VACUUM allowed on a table at a time.)
3560 if (!(tuple
->t_infomask
& HEAP_XMAX_IS_MULTI
))
3562 xid
= HeapTupleHeaderGetXmax(tuple
);
3563 if (TransactionIdIsNormal(xid
) &&
3564 TransactionIdPrecedes(xid
, cutoff_xid
))
3566 if (buf
!= InvalidBuffer
)
3568 /* trade in share lock for exclusive lock */
3569 LockBuffer(buf
, BUFFER_LOCK_UNLOCK
);
3570 LockBuffer(buf
, BUFFER_LOCK_EXCLUSIVE
);
3571 buf
= InvalidBuffer
;
3572 goto recheck_xmax
; /* see comment above */
3574 HeapTupleHeaderSetXmax(tuple
, InvalidTransactionId
);
3577 * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED
3578 * + LOCKED. Normalize to INVALID just to be sure no one gets
3581 tuple
->t_infomask
&= ~HEAP_XMAX_COMMITTED
;
3582 tuple
->t_infomask
|= HEAP_XMAX_INVALID
;
3583 HeapTupleHeaderClearHotUpdated(tuple
);
3590 * XXX perhaps someday we should zero out very old MultiXactIds here?
3592 * The only way a stale MultiXactId could pose a problem is if a
3593 * tuple, having once been multiply-share-locked, is not touched by
3594 * any vacuum or attempted lock or deletion for just over 4G MultiXact
3595 * creations, and then in the probably-narrow window where its xmax
3596 * is again a live MultiXactId, someone tries to lock or delete it.
3597 * Even then, another share-lock attempt would work fine. An
3598 * exclusive-lock or delete attempt would face unexpected delay, or
3599 * in the very worst case get a deadlock error. This seems an
3600 * extremely low-probability scenario with minimal downside even if
3601 * it does happen, so for now we don't do the extra bookkeeping that
3602 * would be needed to clean out MultiXactIds.
3608 * Although xvac per se could only be set by VACUUM, it shares physical
3609 * storage space with cmax, and so could be wiped out by someone setting
3610 * xmax. Hence recheck after changing lock, same as for xmax itself.
3613 if (tuple
->t_infomask
& HEAP_MOVED
)
3615 xid
= HeapTupleHeaderGetXvac(tuple
);
3616 if (TransactionIdIsNormal(xid
) &&
3617 TransactionIdPrecedes(xid
, cutoff_xid
))
3619 if (buf
!= InvalidBuffer
)
3621 /* trade in share lock for exclusive lock */
3622 LockBuffer(buf
, BUFFER_LOCK_UNLOCK
);
3623 LockBuffer(buf
, BUFFER_LOCK_EXCLUSIVE
);
3624 buf
= InvalidBuffer
;
3625 goto recheck_xvac
; /* see comment above */
3629 * If a MOVED_OFF tuple is not dead, the xvac transaction must
3630 * have failed; whereas a non-dead MOVED_IN tuple must mean the
3631 * xvac transaction succeeded.
3633 if (tuple
->t_infomask
& HEAP_MOVED_OFF
)
3634 HeapTupleHeaderSetXvac(tuple
, InvalidTransactionId
);
3636 HeapTupleHeaderSetXvac(tuple
, FrozenTransactionId
);
3639 * Might as well fix the hint bits too; usually XMIN_COMMITTED
3640 * will already be set here, but there's a small chance not.
3642 Assert(!(tuple
->t_infomask
& HEAP_XMIN_INVALID
));
3643 tuple
->t_infomask
|= HEAP_XMIN_COMMITTED
;
3653 * heap_markpos - mark scan position
3657 heap_markpos(HeapScanDesc scan
)
3659 /* Note: no locking manipulations needed */
3661 if (scan
->rs_ctup
.t_data
!= NULL
)
3663 scan
->rs_mctid
= scan
->rs_ctup
.t_self
;
3664 if (scan
->rs_pageatatime
)
3665 scan
->rs_mindex
= scan
->rs_cindex
;
3668 ItemPointerSetInvalid(&scan
->rs_mctid
);
3672 * heap_restrpos - restore position to marked location
3676 heap_restrpos(HeapScanDesc scan
)
3678 /* XXX no amrestrpos checking that ammarkpos called */
3680 if (!ItemPointerIsValid(&scan
->rs_mctid
))
3682 scan
->rs_ctup
.t_data
= NULL
;
3685 * unpin scan buffers
3687 if (BufferIsValid(scan
->rs_cbuf
))
3688 ReleaseBuffer(scan
->rs_cbuf
);
3689 scan
->rs_cbuf
= InvalidBuffer
;
3690 scan
->rs_cblock
= InvalidBlockNumber
;
3691 scan
->rs_inited
= false;
3696 * If we reached end of scan, rs_inited will now be false. We must
3697 * reset it to true to keep heapgettup from doing the wrong thing.
3699 scan
->rs_inited
= true;
3700 scan
->rs_ctup
.t_self
= scan
->rs_mctid
;
3701 if (scan
->rs_pageatatime
)
3703 scan
->rs_cindex
= scan
->rs_mindex
;
3704 heapgettup_pagemode(scan
,
3705 NoMovementScanDirection
,
3706 0, /* needn't recheck scan keys */
3711 NoMovementScanDirection
,
3712 0, /* needn't recheck scan keys */
3718 * Perform XLogInsert for a heap-clean operation. Caller must already
3719 * have modified the buffer and marked it dirty.
3721 * Note: prior to Postgres 8.3, the entries in the nowunused[] array were
3722 * zero-based tuple indexes. Now they are one-based like other uses
3726 log_heap_clean(Relation reln
, Buffer buffer
,
3727 OffsetNumber
*redirected
, int nredirected
,
3728 OffsetNumber
*nowdead
, int ndead
,
3729 OffsetNumber
*nowunused
, int nunused
,
3732 xl_heap_clean xlrec
;
3735 XLogRecData rdata
[4];
3737 /* Caller should not call me on a temp relation */
3738 Assert(!reln
->rd_istemp
);
3740 xlrec
.node
= reln
->rd_node
;
3741 xlrec
.block
= BufferGetBlockNumber(buffer
);
3742 xlrec
.nredirected
= nredirected
;
3743 xlrec
.ndead
= ndead
;
3745 rdata
[0].data
= (char *) &xlrec
;
3746 rdata
[0].len
= SizeOfHeapClean
;
3747 rdata
[0].buffer
= InvalidBuffer
;
3748 rdata
[0].next
= &(rdata
[1]);
3751 * The OffsetNumber arrays are not actually in the buffer, but we pretend
3752 * that they are. When XLogInsert stores the whole buffer, the offset
3753 * arrays need not be stored too. Note that even if all three arrays are
3754 * empty, we want to expose the buffer as a candidate for whole-page
3755 * storage, since this record type implies a defragmentation operation
3756 * even if no item pointers changed state.
3758 if (nredirected
> 0)
3760 rdata
[1].data
= (char *) redirected
;
3761 rdata
[1].len
= nredirected
* sizeof(OffsetNumber
) * 2;
3765 rdata
[1].data
= NULL
;
3768 rdata
[1].buffer
= buffer
;
3769 rdata
[1].buffer_std
= true;
3770 rdata
[1].next
= &(rdata
[2]);
3774 rdata
[2].data
= (char *) nowdead
;
3775 rdata
[2].len
= ndead
* sizeof(OffsetNumber
);
3779 rdata
[2].data
= NULL
;
3782 rdata
[2].buffer
= buffer
;
3783 rdata
[2].buffer_std
= true;
3784 rdata
[2].next
= &(rdata
[3]);
3788 rdata
[3].data
= (char *) nowunused
;
3789 rdata
[3].len
= nunused
* sizeof(OffsetNumber
);
3793 rdata
[3].data
= NULL
;
3796 rdata
[3].buffer
= buffer
;
3797 rdata
[3].buffer_std
= true;
3798 rdata
[3].next
= NULL
;
3800 info
= redirect_move
? XLOG_HEAP2_CLEAN_MOVE
: XLOG_HEAP2_CLEAN
;
3801 recptr
= XLogInsert(RM_HEAP2_ID
, info
, rdata
);
3807 * Perform XLogInsert for a heap-freeze operation. Caller must already
3808 * have modified the buffer and marked it dirty.
3811 log_heap_freeze(Relation reln
, Buffer buffer
,
3812 TransactionId cutoff_xid
,
3813 OffsetNumber
*offsets
, int offcnt
)
3815 xl_heap_freeze xlrec
;
3817 XLogRecData rdata
[2];
3819 /* Caller should not call me on a temp relation */
3820 Assert(!reln
->rd_istemp
);
3822 xlrec
.node
= reln
->rd_node
;
3823 xlrec
.block
= BufferGetBlockNumber(buffer
);
3824 xlrec
.cutoff_xid
= cutoff_xid
;
3826 rdata
[0].data
= (char *) &xlrec
;
3827 rdata
[0].len
= SizeOfHeapFreeze
;
3828 rdata
[0].buffer
= InvalidBuffer
;
3829 rdata
[0].next
= &(rdata
[1]);
3832 * The tuple-offsets array is not actually in the buffer, but pretend that
3833 * it is. When XLogInsert stores the whole buffer, the offsets array need
3834 * not be stored too.
3838 rdata
[1].data
= (char *) offsets
;
3839 rdata
[1].len
= offcnt
* sizeof(OffsetNumber
);
3843 rdata
[1].data
= NULL
;
3846 rdata
[1].buffer
= buffer
;
3847 rdata
[1].buffer_std
= true;
3848 rdata
[1].next
= NULL
;
3850 recptr
= XLogInsert(RM_HEAP2_ID
, XLOG_HEAP2_FREEZE
, rdata
);
3856 * Perform XLogInsert for a heap-update operation. Caller must already
3857 * have modified the buffer(s) and marked them dirty.
3860 log_heap_update(Relation reln
, Buffer oldbuf
, ItemPointerData from
,
3861 Buffer newbuf
, HeapTuple newtup
, bool move
)
3864 * Note: xlhdr is declared to have adequate size and correct alignment for
3865 * an xl_heap_header. However the two tids, if present at all, will be
3866 * packed in with no wasted space after the xl_heap_header; they aren't
3867 * necessarily aligned as implied by this struct declaration.
3875 int hsize
= SizeOfHeapHeader
;
3876 xl_heap_update xlrec
;
3879 XLogRecData rdata
[4];
3880 Page page
= BufferGetPage(newbuf
);
3882 /* Caller should not call me on a temp relation */
3883 Assert(!reln
->rd_istemp
);
3887 Assert(!HeapTupleIsHeapOnly(newtup
));
3888 info
= XLOG_HEAP_MOVE
;
3890 else if (HeapTupleIsHeapOnly(newtup
))
3891 info
= XLOG_HEAP_HOT_UPDATE
;
3893 info
= XLOG_HEAP_UPDATE
;
3895 xlrec
.target
.node
= reln
->rd_node
;
3896 xlrec
.target
.tid
= from
;
3897 xlrec
.newtid
= newtup
->t_self
;
3899 rdata
[0].data
= (char *) &xlrec
;
3900 rdata
[0].len
= SizeOfHeapUpdate
;
3901 rdata
[0].buffer
= InvalidBuffer
;
3902 rdata
[0].next
= &(rdata
[1]);
3904 rdata
[1].data
= NULL
;
3906 rdata
[1].buffer
= oldbuf
;
3907 rdata
[1].buffer_std
= true;
3908 rdata
[1].next
= &(rdata
[2]);
3910 xlhdr
.hdr
.t_infomask2
= newtup
->t_data
->t_infomask2
;
3911 xlhdr
.hdr
.t_infomask
= newtup
->t_data
->t_infomask
;
3912 xlhdr
.hdr
.t_hoff
= newtup
->t_data
->t_hoff
;
3913 if (move
) /* remember xmax & xmin */
3915 TransactionId xid
[2]; /* xmax, xmin */
3917 if (newtup
->t_data
->t_infomask
& (HEAP_XMAX_INVALID
| HEAP_IS_LOCKED
))
3918 xid
[0] = InvalidTransactionId
;
3920 xid
[0] = HeapTupleHeaderGetXmax(newtup
->t_data
);
3921 xid
[1] = HeapTupleHeaderGetXmin(newtup
->t_data
);
3922 memcpy((char *) &xlhdr
+ hsize
,
3924 2 * sizeof(TransactionId
));
3925 hsize
+= 2 * sizeof(TransactionId
);
3929 * As with insert records, we need not store the rdata[2] segment if we
3930 * decide to store the whole buffer instead.
3932 rdata
[2].data
= (char *) &xlhdr
;
3933 rdata
[2].len
= hsize
;
3934 rdata
[2].buffer
= newbuf
;
3935 rdata
[2].buffer_std
= true;
3936 rdata
[2].next
= &(rdata
[3]);
3938 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
3939 rdata
[3].data
= (char *) newtup
->t_data
+ offsetof(HeapTupleHeaderData
, t_bits
);
3940 rdata
[3].len
= newtup
->t_len
- offsetof(HeapTupleHeaderData
, t_bits
);
3941 rdata
[3].buffer
= newbuf
;
3942 rdata
[3].buffer_std
= true;
3943 rdata
[3].next
= NULL
;
3945 /* If new tuple is the single and first tuple on page... */
3946 if (ItemPointerGetOffsetNumber(&(newtup
->t_self
)) == FirstOffsetNumber
&&
3947 PageGetMaxOffsetNumber(page
) == FirstOffsetNumber
)
3949 info
|= XLOG_HEAP_INIT_PAGE
;
3950 rdata
[2].buffer
= rdata
[3].buffer
= InvalidBuffer
;
3953 recptr
= XLogInsert(RM_HEAP_ID
, info
, rdata
);
3959 * Perform XLogInsert for a heap-move operation. Caller must already
3960 * have modified the buffers and marked them dirty.
3963 log_heap_move(Relation reln
, Buffer oldbuf
, ItemPointerData from
,
3964 Buffer newbuf
, HeapTuple newtup
)
3966 return log_heap_update(reln
, oldbuf
, from
, newbuf
, newtup
, true);
3970 * Perform XLogInsert of a HEAP_NEWPAGE record to WAL. Caller is responsible
3971 * for writing the page to disk after calling this routine.
3973 * Note: all current callers build pages in private memory and write them
3974 * directly to smgr, rather than using bufmgr. Therefore there is no need
3975 * to pass a buffer ID to XLogInsert, nor to perform MarkBufferDirty within
3976 * the critical section.
3978 * Note: the NEWPAGE log record is used for both heaps and indexes, so do
3979 * not do anything that assumes we are touching a heap.
3982 log_newpage(RelFileNode
*rnode
, ForkNumber forkNum
, BlockNumber blkno
,
3985 xl_heap_newpage xlrec
;
3987 XLogRecData rdata
[2];
3989 /* NO ELOG(ERROR) from here till newpage op is logged */
3990 START_CRIT_SECTION();
3992 xlrec
.node
= *rnode
;
3993 xlrec
.forknum
= forkNum
;
3994 xlrec
.blkno
= blkno
;
3996 rdata
[0].data
= (char *) &xlrec
;
3997 rdata
[0].len
= SizeOfHeapNewpage
;
3998 rdata
[0].buffer
= InvalidBuffer
;
3999 rdata
[0].next
= &(rdata
[1]);
4001 rdata
[1].data
= (char *) page
;
4002 rdata
[1].len
= BLCKSZ
;
4003 rdata
[1].buffer
= InvalidBuffer
;
4004 rdata
[1].next
= NULL
;
4006 recptr
= XLogInsert(RM_HEAP_ID
, XLOG_HEAP_NEWPAGE
, rdata
);
4008 PageSetLSN(page
, recptr
);
4009 PageSetTLI(page
, ThisTimeLineID
);
4017 * Handles CLEAN and CLEAN_MOVE record types
4020 heap_xlog_clean(XLogRecPtr lsn
, XLogRecord
*record
, bool clean_move
)
4022 xl_heap_clean
*xlrec
= (xl_heap_clean
*) XLogRecGetData(record
);
4026 OffsetNumber
*redirected
;
4027 OffsetNumber
*nowdead
;
4028 OffsetNumber
*nowunused
;
4033 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4036 buffer
= XLogReadBuffer(xlrec
->node
, xlrec
->block
, false);
4037 if (!BufferIsValid(buffer
))
4039 page
= (Page
) BufferGetPage(buffer
);
4041 if (XLByteLE(lsn
, PageGetLSN(page
)))
4043 UnlockReleaseBuffer(buffer
);
4047 nredirected
= xlrec
->nredirected
;
4048 ndead
= xlrec
->ndead
;
4049 end
= (OffsetNumber
*) ((char *) xlrec
+ record
->xl_len
);
4050 redirected
= (OffsetNumber
*) ((char *) xlrec
+ SizeOfHeapClean
);
4051 nowdead
= redirected
+ (nredirected
* 2);
4052 nowunused
= nowdead
+ ndead
;
4053 nunused
= (end
- nowunused
);
4054 Assert(nunused
>= 0);
4056 /* Update all item pointers per the record, and repair fragmentation */
4057 heap_page_prune_execute(buffer
,
4058 redirected
, nredirected
,
4064 * Note: we don't worry about updating the page's prunability hints.
4065 * At worst this will cause an extra prune cycle to occur soon.
4068 PageSetLSN(page
, lsn
);
4069 PageSetTLI(page
, ThisTimeLineID
);
4070 MarkBufferDirty(buffer
);
4071 UnlockReleaseBuffer(buffer
);
4075 heap_xlog_freeze(XLogRecPtr lsn
, XLogRecord
*record
)
4077 xl_heap_freeze
*xlrec
= (xl_heap_freeze
*) XLogRecGetData(record
);
4078 TransactionId cutoff_xid
= xlrec
->cutoff_xid
;
4082 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4085 buffer
= XLogReadBuffer(xlrec
->node
, xlrec
->block
, false);
4086 if (!BufferIsValid(buffer
))
4088 page
= (Page
) BufferGetPage(buffer
);
4090 if (XLByteLE(lsn
, PageGetLSN(page
)))
4092 UnlockReleaseBuffer(buffer
);
4096 if (record
->xl_len
> SizeOfHeapFreeze
)
4098 OffsetNumber
*offsets
;
4099 OffsetNumber
*offsets_end
;
4101 offsets
= (OffsetNumber
*) ((char *) xlrec
+ SizeOfHeapFreeze
);
4102 offsets_end
= (OffsetNumber
*) ((char *) xlrec
+ record
->xl_len
);
4104 while (offsets
< offsets_end
)
4106 /* offsets[] entries are one-based */
4107 ItemId lp
= PageGetItemId(page
, *offsets
);
4108 HeapTupleHeader tuple
= (HeapTupleHeader
) PageGetItem(page
, lp
);
4110 (void) heap_freeze_tuple(tuple
, cutoff_xid
, InvalidBuffer
);
4115 PageSetLSN(page
, lsn
);
4116 PageSetTLI(page
, ThisTimeLineID
);
4117 MarkBufferDirty(buffer
);
4118 UnlockReleaseBuffer(buffer
);
4122 heap_xlog_newpage(XLogRecPtr lsn
, XLogRecord
*record
)
4124 xl_heap_newpage
*xlrec
= (xl_heap_newpage
*) XLogRecGetData(record
);
4129 * Note: the NEWPAGE log record is used for both heaps and indexes, so do
4130 * not do anything that assumes we are touching a heap.
4132 buffer
= XLogReadBuffer(xlrec
->node
, xlrec
->blkno
, true);
4133 Assert(BufferIsValid(buffer
));
4134 page
= (Page
) BufferGetPage(buffer
);
4136 Assert(record
->xl_len
== SizeOfHeapNewpage
+ BLCKSZ
);
4137 memcpy(page
, (char *) xlrec
+ SizeOfHeapNewpage
, BLCKSZ
);
4139 PageSetLSN(page
, lsn
);
4140 PageSetTLI(page
, ThisTimeLineID
);
4141 MarkBufferDirty(buffer
);
4142 UnlockReleaseBuffer(buffer
);
4146 heap_xlog_delete(XLogRecPtr lsn
, XLogRecord
*record
)
4148 xl_heap_delete
*xlrec
= (xl_heap_delete
*) XLogRecGetData(record
);
4151 OffsetNumber offnum
;
4153 HeapTupleHeader htup
;
4155 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4158 buffer
= XLogReadBuffer(xlrec
->target
.node
,
4159 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)),
4161 if (!BufferIsValid(buffer
))
4163 page
= (Page
) BufferGetPage(buffer
);
4165 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4167 UnlockReleaseBuffer(buffer
);
4171 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->target
.tid
));
4172 if (PageGetMaxOffsetNumber(page
) >= offnum
)
4173 lp
= PageGetItemId(page
, offnum
);
4175 if (PageGetMaxOffsetNumber(page
) < offnum
|| !ItemIdIsNormal(lp
))
4176 elog(PANIC
, "heap_delete_redo: invalid lp");
4178 htup
= (HeapTupleHeader
) PageGetItem(page
, lp
);
4180 htup
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
4182 HEAP_XMAX_IS_MULTI
|
4185 HeapTupleHeaderClearHotUpdated(htup
);
4186 HeapTupleHeaderSetXmax(htup
, record
->xl_xid
);
4187 HeapTupleHeaderSetCmax(htup
, FirstCommandId
, false);
4189 /* Mark the page as a candidate for pruning */
4190 PageSetPrunable(page
, record
->xl_xid
);
4192 /* Make sure there is no forward chain link in t_ctid */
4193 htup
->t_ctid
= xlrec
->target
.tid
;
4194 PageSetLSN(page
, lsn
);
4195 PageSetTLI(page
, ThisTimeLineID
);
4196 MarkBufferDirty(buffer
);
4197 UnlockReleaseBuffer(buffer
);
4201 heap_xlog_insert(XLogRecPtr lsn
, XLogRecord
*record
)
4203 xl_heap_insert
*xlrec
= (xl_heap_insert
*) XLogRecGetData(record
);
4206 OffsetNumber offnum
;
4209 HeapTupleHeaderData hdr
;
4210 char data
[MaxHeapTupleSize
];
4212 HeapTupleHeader htup
;
4213 xl_heap_header xlhdr
;
4216 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4219 if (record
->xl_info
& XLOG_HEAP_INIT_PAGE
)
4221 buffer
= XLogReadBuffer(xlrec
->target
.node
,
4222 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)),
4224 Assert(BufferIsValid(buffer
));
4225 page
= (Page
) BufferGetPage(buffer
);
4227 PageInit(page
, BufferGetPageSize(buffer
), 0);
4231 buffer
= XLogReadBuffer(xlrec
->target
.node
,
4232 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)),
4234 if (!BufferIsValid(buffer
))
4236 page
= (Page
) BufferGetPage(buffer
);
4238 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4240 UnlockReleaseBuffer(buffer
);
4245 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->target
.tid
));
4246 if (PageGetMaxOffsetNumber(page
) + 1 < offnum
)
4247 elog(PANIC
, "heap_insert_redo: invalid max offset number");
4249 newlen
= record
->xl_len
- SizeOfHeapInsert
- SizeOfHeapHeader
;
4250 Assert(newlen
<= MaxHeapTupleSize
);
4251 memcpy((char *) &xlhdr
,
4252 (char *) xlrec
+ SizeOfHeapInsert
,
4255 MemSet((char *) htup
, 0, sizeof(HeapTupleHeaderData
));
4256 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
4257 memcpy((char *) htup
+ offsetof(HeapTupleHeaderData
, t_bits
),
4258 (char *) xlrec
+ SizeOfHeapInsert
+ SizeOfHeapHeader
,
4260 newlen
+= offsetof(HeapTupleHeaderData
, t_bits
);
4261 htup
->t_infomask2
= xlhdr
.t_infomask2
;
4262 htup
->t_infomask
= xlhdr
.t_infomask
;
4263 htup
->t_hoff
= xlhdr
.t_hoff
;
4264 HeapTupleHeaderSetXmin(htup
, record
->xl_xid
);
4265 HeapTupleHeaderSetCmin(htup
, FirstCommandId
);
4266 htup
->t_ctid
= xlrec
->target
.tid
;
4268 offnum
= PageAddItem(page
, (Item
) htup
, newlen
, offnum
, true, true);
4269 if (offnum
== InvalidOffsetNumber
)
4270 elog(PANIC
, "heap_insert_redo: failed to add tuple");
4271 PageSetLSN(page
, lsn
);
4272 PageSetTLI(page
, ThisTimeLineID
);
4273 MarkBufferDirty(buffer
);
4274 UnlockReleaseBuffer(buffer
);
4278 * Handles UPDATE, HOT_UPDATE & MOVE
4281 heap_xlog_update(XLogRecPtr lsn
, XLogRecord
*record
, bool move
, bool hot_update
)
4283 xl_heap_update
*xlrec
= (xl_heap_update
*) XLogRecGetData(record
);
4285 bool samepage
= (ItemPointerGetBlockNumber(&(xlrec
->newtid
)) ==
4286 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)));
4288 OffsetNumber offnum
;
4290 HeapTupleHeader htup
;
4293 HeapTupleHeaderData hdr
;
4294 char data
[MaxHeapTupleSize
];
4296 xl_heap_header xlhdr
;
4300 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4303 return; /* backup block covered both changes */
4307 /* Deal with old tuple version */
4309 buffer
= XLogReadBuffer(xlrec
->target
.node
,
4310 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)),
4312 if (!BufferIsValid(buffer
))
4314 page
= (Page
) BufferGetPage(buffer
);
4316 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4318 UnlockReleaseBuffer(buffer
);
4324 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->target
.tid
));
4325 if (PageGetMaxOffsetNumber(page
) >= offnum
)
4326 lp
= PageGetItemId(page
, offnum
);
4328 if (PageGetMaxOffsetNumber(page
) < offnum
|| !ItemIdIsNormal(lp
))
4329 elog(PANIC
, "heap_update_redo: invalid lp");
4331 htup
= (HeapTupleHeader
) PageGetItem(page
, lp
);
4335 htup
->t_infomask
&= ~(HEAP_XMIN_COMMITTED
|
4338 htup
->t_infomask
|= HEAP_MOVED_OFF
;
4339 HeapTupleHeaderClearHotUpdated(htup
);
4340 HeapTupleHeaderSetXvac(htup
, record
->xl_xid
);
4341 /* Make sure there is no forward chain link in t_ctid */
4342 htup
->t_ctid
= xlrec
->target
.tid
;
4346 htup
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
4348 HEAP_XMAX_IS_MULTI
|
4352 HeapTupleHeaderSetHotUpdated(htup
);
4354 HeapTupleHeaderClearHotUpdated(htup
);
4355 HeapTupleHeaderSetXmax(htup
, record
->xl_xid
);
4356 HeapTupleHeaderSetCmax(htup
, FirstCommandId
, false);
4357 /* Set forward chain link in t_ctid */
4358 htup
->t_ctid
= xlrec
->newtid
;
4361 /* Mark the page as a candidate for pruning */
4362 PageSetPrunable(page
, record
->xl_xid
);
4365 * this test is ugly, but necessary to avoid thinking that insert change
4366 * is already applied
4370 PageSetLSN(page
, lsn
);
4371 PageSetTLI(page
, ThisTimeLineID
);
4372 MarkBufferDirty(buffer
);
4373 UnlockReleaseBuffer(buffer
);
4375 /* Deal with new tuple */
4379 if (record
->xl_info
& XLR_BKP_BLOCK_2
)
4382 if (record
->xl_info
& XLOG_HEAP_INIT_PAGE
)
4384 buffer
= XLogReadBuffer(xlrec
->target
.node
,
4385 ItemPointerGetBlockNumber(&(xlrec
->newtid
)),
4387 Assert(BufferIsValid(buffer
));
4388 page
= (Page
) BufferGetPage(buffer
);
4390 PageInit(page
, BufferGetPageSize(buffer
), 0);
4394 buffer
= XLogReadBuffer(xlrec
->target
.node
,
4395 ItemPointerGetBlockNumber(&(xlrec
->newtid
)),
4397 if (!BufferIsValid(buffer
))
4399 page
= (Page
) BufferGetPage(buffer
);
4401 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4403 UnlockReleaseBuffer(buffer
);
4410 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->newtid
));
4411 if (PageGetMaxOffsetNumber(page
) + 1 < offnum
)
4412 elog(PANIC
, "heap_update_redo: invalid max offset number");
4414 hsize
= SizeOfHeapUpdate
+ SizeOfHeapHeader
;
4416 hsize
+= (2 * sizeof(TransactionId
));
4418 newlen
= record
->xl_len
- hsize
;
4419 Assert(newlen
<= MaxHeapTupleSize
);
4420 memcpy((char *) &xlhdr
,
4421 (char *) xlrec
+ SizeOfHeapUpdate
,
4424 MemSet((char *) htup
, 0, sizeof(HeapTupleHeaderData
));
4425 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
4426 memcpy((char *) htup
+ offsetof(HeapTupleHeaderData
, t_bits
),
4427 (char *) xlrec
+ hsize
,
4429 newlen
+= offsetof(HeapTupleHeaderData
, t_bits
);
4430 htup
->t_infomask2
= xlhdr
.t_infomask2
;
4431 htup
->t_infomask
= xlhdr
.t_infomask
;
4432 htup
->t_hoff
= xlhdr
.t_hoff
;
4436 TransactionId xid
[2]; /* xmax, xmin */
4438 memcpy((char *) xid
,
4439 (char *) xlrec
+ SizeOfHeapUpdate
+ SizeOfHeapHeader
,
4440 2 * sizeof(TransactionId
));
4441 HeapTupleHeaderSetXmin(htup
, xid
[1]);
4442 HeapTupleHeaderSetXmax(htup
, xid
[0]);
4443 HeapTupleHeaderSetXvac(htup
, record
->xl_xid
);
4447 HeapTupleHeaderSetXmin(htup
, record
->xl_xid
);
4448 HeapTupleHeaderSetCmin(htup
, FirstCommandId
);
4450 /* Make sure there is no forward chain link in t_ctid */
4451 htup
->t_ctid
= xlrec
->newtid
;
4453 offnum
= PageAddItem(page
, (Item
) htup
, newlen
, offnum
, true, true);
4454 if (offnum
== InvalidOffsetNumber
)
4455 elog(PANIC
, "heap_update_redo: failed to add tuple");
4456 PageSetLSN(page
, lsn
);
4457 PageSetTLI(page
, ThisTimeLineID
);
4458 MarkBufferDirty(buffer
);
4459 UnlockReleaseBuffer(buffer
);
4463 heap_xlog_lock(XLogRecPtr lsn
, XLogRecord
*record
)
4465 xl_heap_lock
*xlrec
= (xl_heap_lock
*) XLogRecGetData(record
);
4468 OffsetNumber offnum
;
4470 HeapTupleHeader htup
;
4472 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4475 buffer
= XLogReadBuffer(xlrec
->target
.node
,
4476 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)),
4478 if (!BufferIsValid(buffer
))
4480 page
= (Page
) BufferGetPage(buffer
);
4482 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4484 UnlockReleaseBuffer(buffer
);
4488 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->target
.tid
));
4489 if (PageGetMaxOffsetNumber(page
) >= offnum
)
4490 lp
= PageGetItemId(page
, offnum
);
4492 if (PageGetMaxOffsetNumber(page
) < offnum
|| !ItemIdIsNormal(lp
))
4493 elog(PANIC
, "heap_lock_redo: invalid lp");
4495 htup
= (HeapTupleHeader
) PageGetItem(page
, lp
);
4497 htup
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
4499 HEAP_XMAX_IS_MULTI
|
4502 if (xlrec
->xid_is_mxact
)
4503 htup
->t_infomask
|= HEAP_XMAX_IS_MULTI
;
4504 if (xlrec
->shared_lock
)
4505 htup
->t_infomask
|= HEAP_XMAX_SHARED_LOCK
;
4507 htup
->t_infomask
|= HEAP_XMAX_EXCL_LOCK
;
4508 HeapTupleHeaderClearHotUpdated(htup
);
4509 HeapTupleHeaderSetXmax(htup
, xlrec
->locking_xid
);
4510 HeapTupleHeaderSetCmax(htup
, FirstCommandId
, false);
4511 /* Make sure there is no forward chain link in t_ctid */
4512 htup
->t_ctid
= xlrec
->target
.tid
;
4513 PageSetLSN(page
, lsn
);
4514 PageSetTLI(page
, ThisTimeLineID
);
4515 MarkBufferDirty(buffer
);
4516 UnlockReleaseBuffer(buffer
);
4520 heap_xlog_inplace(XLogRecPtr lsn
, XLogRecord
*record
)
4522 xl_heap_inplace
*xlrec
= (xl_heap_inplace
*) XLogRecGetData(record
);
4525 OffsetNumber offnum
;
4527 HeapTupleHeader htup
;
4531 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4534 buffer
= XLogReadBuffer(xlrec
->target
.node
,
4535 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)),
4537 if (!BufferIsValid(buffer
))
4539 page
= (Page
) BufferGetPage(buffer
);
4541 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4543 UnlockReleaseBuffer(buffer
);
4547 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->target
.tid
));
4548 if (PageGetMaxOffsetNumber(page
) >= offnum
)
4549 lp
= PageGetItemId(page
, offnum
);
4551 if (PageGetMaxOffsetNumber(page
) < offnum
|| !ItemIdIsNormal(lp
))
4552 elog(PANIC
, "heap_inplace_redo: invalid lp");
4554 htup
= (HeapTupleHeader
) PageGetItem(page
, lp
);
4556 oldlen
= ItemIdGetLength(lp
) - htup
->t_hoff
;
4557 newlen
= record
->xl_len
- SizeOfHeapInplace
;
4558 if (oldlen
!= newlen
)
4559 elog(PANIC
, "heap_inplace_redo: wrong tuple length");
4561 memcpy((char *) htup
+ htup
->t_hoff
,
4562 (char *) xlrec
+ SizeOfHeapInplace
,
4565 PageSetLSN(page
, lsn
);
4566 PageSetTLI(page
, ThisTimeLineID
);
4567 MarkBufferDirty(buffer
);
4568 UnlockReleaseBuffer(buffer
);
4572 heap_redo(XLogRecPtr lsn
, XLogRecord
*record
)
4574 uint8 info
= record
->xl_info
& ~XLR_INFO_MASK
;
4576 switch (info
& XLOG_HEAP_OPMASK
)
4578 case XLOG_HEAP_INSERT
:
4579 heap_xlog_insert(lsn
, record
);
4581 case XLOG_HEAP_DELETE
:
4582 heap_xlog_delete(lsn
, record
);
4584 case XLOG_HEAP_UPDATE
:
4585 heap_xlog_update(lsn
, record
, false, false);
4587 case XLOG_HEAP_MOVE
:
4588 heap_xlog_update(lsn
, record
, true, false);
4590 case XLOG_HEAP_HOT_UPDATE
:
4591 heap_xlog_update(lsn
, record
, false, true);
4593 case XLOG_HEAP_NEWPAGE
:
4594 heap_xlog_newpage(lsn
, record
);
4596 case XLOG_HEAP_LOCK
:
4597 heap_xlog_lock(lsn
, record
);
4599 case XLOG_HEAP_INPLACE
:
4600 heap_xlog_inplace(lsn
, record
);
4603 elog(PANIC
, "heap_redo: unknown op code %u", info
);
4608 heap2_redo(XLogRecPtr lsn
, XLogRecord
*record
)
4610 uint8 info
= record
->xl_info
& ~XLR_INFO_MASK
;
4612 switch (info
& XLOG_HEAP_OPMASK
)
4614 case XLOG_HEAP2_FREEZE
:
4615 heap_xlog_freeze(lsn
, record
);
4617 case XLOG_HEAP2_CLEAN
:
4618 heap_xlog_clean(lsn
, record
, false);
4620 case XLOG_HEAP2_CLEAN_MOVE
:
4621 heap_xlog_clean(lsn
, record
, true);
4624 elog(PANIC
, "heap2_redo: unknown op code %u", info
);
4629 out_target(StringInfo buf
, xl_heaptid
*target
)
4631 appendStringInfo(buf
, "rel %u/%u/%u; tid %u/%u",
4632 target
->node
.spcNode
, target
->node
.dbNode
, target
->node
.relNode
,
4633 ItemPointerGetBlockNumber(&(target
->tid
)),
4634 ItemPointerGetOffsetNumber(&(target
->tid
)));
4638 heap_desc(StringInfo buf
, uint8 xl_info
, char *rec
)
4640 uint8 info
= xl_info
& ~XLR_INFO_MASK
;
4642 info
&= XLOG_HEAP_OPMASK
;
4643 if (info
== XLOG_HEAP_INSERT
)
4645 xl_heap_insert
*xlrec
= (xl_heap_insert
*) rec
;
4647 if (xl_info
& XLOG_HEAP_INIT_PAGE
)
4648 appendStringInfo(buf
, "insert(init): ");
4650 appendStringInfo(buf
, "insert: ");
4651 out_target(buf
, &(xlrec
->target
));
4653 else if (info
== XLOG_HEAP_DELETE
)
4655 xl_heap_delete
*xlrec
= (xl_heap_delete
*) rec
;
4657 appendStringInfo(buf
, "delete: ");
4658 out_target(buf
, &(xlrec
->target
));
4660 else if (info
== XLOG_HEAP_UPDATE
)
4662 xl_heap_update
*xlrec
= (xl_heap_update
*) rec
;
4664 if (xl_info
& XLOG_HEAP_INIT_PAGE
)
4665 appendStringInfo(buf
, "update(init): ");
4667 appendStringInfo(buf
, "update: ");
4668 out_target(buf
, &(xlrec
->target
));
4669 appendStringInfo(buf
, "; new %u/%u",
4670 ItemPointerGetBlockNumber(&(xlrec
->newtid
)),
4671 ItemPointerGetOffsetNumber(&(xlrec
->newtid
)));
4673 else if (info
== XLOG_HEAP_MOVE
)
4675 xl_heap_update
*xlrec
= (xl_heap_update
*) rec
;
4677 if (xl_info
& XLOG_HEAP_INIT_PAGE
)
4678 appendStringInfo(buf
, "move(init): ");
4680 appendStringInfo(buf
, "move: ");
4681 out_target(buf
, &(xlrec
->target
));
4682 appendStringInfo(buf
, "; new %u/%u",
4683 ItemPointerGetBlockNumber(&(xlrec
->newtid
)),
4684 ItemPointerGetOffsetNumber(&(xlrec
->newtid
)));
4686 else if (info
== XLOG_HEAP_HOT_UPDATE
)
4688 xl_heap_update
*xlrec
= (xl_heap_update
*) rec
;
4690 if (xl_info
& XLOG_HEAP_INIT_PAGE
) /* can this case happen? */
4691 appendStringInfo(buf
, "hot_update(init): ");
4693 appendStringInfo(buf
, "hot_update: ");
4694 out_target(buf
, &(xlrec
->target
));
4695 appendStringInfo(buf
, "; new %u/%u",
4696 ItemPointerGetBlockNumber(&(xlrec
->newtid
)),
4697 ItemPointerGetOffsetNumber(&(xlrec
->newtid
)));
4699 else if (info
== XLOG_HEAP_NEWPAGE
)
4701 xl_heap_newpage
*xlrec
= (xl_heap_newpage
*) rec
;
4703 appendStringInfo(buf
, "newpage: rel %u/%u/%u; blk %u",
4704 xlrec
->node
.spcNode
, xlrec
->node
.dbNode
,
4705 xlrec
->node
.relNode
, xlrec
->blkno
);
4707 else if (info
== XLOG_HEAP_LOCK
)
4709 xl_heap_lock
*xlrec
= (xl_heap_lock
*) rec
;
4711 if (xlrec
->shared_lock
)
4712 appendStringInfo(buf
, "shared_lock: ");
4714 appendStringInfo(buf
, "exclusive_lock: ");
4715 if (xlrec
->xid_is_mxact
)
4716 appendStringInfo(buf
, "mxid ");
4718 appendStringInfo(buf
, "xid ");
4719 appendStringInfo(buf
, "%u ", xlrec
->locking_xid
);
4720 out_target(buf
, &(xlrec
->target
));
4722 else if (info
== XLOG_HEAP_INPLACE
)
4724 xl_heap_inplace
*xlrec
= (xl_heap_inplace
*) rec
;
4726 appendStringInfo(buf
, "inplace: ");
4727 out_target(buf
, &(xlrec
->target
));
4730 appendStringInfo(buf
, "UNKNOWN");
4734 heap2_desc(StringInfo buf
, uint8 xl_info
, char *rec
)
4736 uint8 info
= xl_info
& ~XLR_INFO_MASK
;
4738 info
&= XLOG_HEAP_OPMASK
;
4739 if (info
== XLOG_HEAP2_FREEZE
)
4741 xl_heap_freeze
*xlrec
= (xl_heap_freeze
*) rec
;
4743 appendStringInfo(buf
, "freeze: rel %u/%u/%u; blk %u; cutoff %u",
4744 xlrec
->node
.spcNode
, xlrec
->node
.dbNode
,
4745 xlrec
->node
.relNode
, xlrec
->block
,
4748 else if (info
== XLOG_HEAP2_CLEAN
)
4750 xl_heap_clean
*xlrec
= (xl_heap_clean
*) rec
;
4752 appendStringInfo(buf
, "clean: rel %u/%u/%u; blk %u",
4753 xlrec
->node
.spcNode
, xlrec
->node
.dbNode
,
4754 xlrec
->node
.relNode
, xlrec
->block
);
4756 else if (info
== XLOG_HEAP2_CLEAN_MOVE
)
4758 xl_heap_clean
*xlrec
= (xl_heap_clean
*) rec
;
4760 appendStringInfo(buf
, "clean_move: rel %u/%u/%u; blk %u",
4761 xlrec
->node
.spcNode
, xlrec
->node
.dbNode
,
4762 xlrec
->node
.relNode
, xlrec
->block
);
4765 appendStringInfo(buf
, "UNKNOWN");
4769 * heap_sync - sync a heap, for use when no WAL has been written
4771 * This forces the heap contents (including TOAST heap if any) down to disk.
4772 * If we skipped using WAL, and it's not a temp relation, we must force the
4773 * relation down to disk before it's safe to commit the transaction. This
4774 * requires writing out any dirty buffers and then doing a forced fsync.
4776 * Indexes are not touched. (Currently, index operations associated with
4777 * the commands that use this are WAL-logged and so do not need fsync.
4778 * That behavior might change someday, but in any case it's likely that
4779 * any fsync decisions required would be per-index and hence not appropriate
4783 heap_sync(Relation rel
)
4785 /* temp tables never need fsync */
4790 FlushRelationBuffers(rel
);
4791 /* FlushRelationBuffers will have opened rd_smgr */
4792 smgrimmedsync(rel
->rd_smgr
, MAIN_FORKNUM
);
4794 /* sync FSM as well */
4795 smgrimmedsync(rel
->rd_smgr
, FSM_FORKNUM
);
4797 /* toast heap, if any */
4798 if (OidIsValid(rel
->rd_rel
->reltoastrelid
))
4802 toastrel
= heap_open(rel
->rd_rel
->reltoastrelid
, AccessShareLock
);
4803 FlushRelationBuffers(toastrel
);
4804 smgrimmedsync(toastrel
->rd_smgr
, MAIN_FORKNUM
);
4805 smgrimmedsync(toastrel
->rd_smgr
, FSM_FORKNUM
);
4806 heap_close(toastrel
, AccessShareLock
);