1 /*-------------------------------------------------------------------------
4 * heap access method code
6 * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
15 * relation_open - open any relation by relation OID
16 * relation_openrv - open any relation specified by a RangeVar
17 * relation_close - close any relation
18 * heap_open - open a heap relation by relation OID
19 * heap_openrv - open a heap relation specified by a RangeVar
20 * heap_close - (now just a macro for relation_close)
21 * heap_beginscan - begin relation scan
22 * heap_rescan - restart a relation scan
23 * heap_endscan - end relation scan
24 * heap_getnext - retrieve next tuple in scan
25 * heap_fetch - retrieve tuple with given tid
26 * heap_insert - insert tuple into a relation
27 * heap_delete - delete a tuple from a relation
28 * heap_update - replace a tuple in a relation with another tuple
29 * heap_markpos - mark scan position
30 * heap_restrpos - restore position to marked location
31 * heap_sync - sync heap, for when no WAL has been written
34 * This file contains the heap_ routines which implement
35 * the POSTGRES heap access method used for all POSTGRES
38 *-------------------------------------------------------------------------
42 #include "access/heapam.h"
43 #include "access/hio.h"
44 #include "access/multixact.h"
45 #include "access/relscan.h"
46 #include "access/sysattr.h"
47 #include "access/transam.h"
48 #include "access/tuptoaster.h"
49 #include "access/valid.h"
50 #include "access/visibilitymap.h"
51 #include "access/xact.h"
52 #include "access/xlogutils.h"
53 #include "catalog/catalog.h"
54 #include "catalog/namespace.h"
55 #include "miscadmin.h"
57 #include "storage/bufmgr.h"
58 #include "storage/freespace.h"
59 #include "storage/lmgr.h"
60 #include "storage/procarray.h"
61 #include "storage/smgr.h"
62 #include "utils/datum.h"
63 #include "utils/inval.h"
64 #include "utils/lsyscache.h"
65 #include "utils/relcache.h"
66 #include "utils/snapmgr.h"
67 #include "utils/syscache.h"
68 #include "utils/tqual.h"
72 bool synchronize_seqscans
= true;
75 static HeapScanDesc
heap_beginscan_internal(Relation relation
,
77 int nkeys
, ScanKey key
,
78 bool allow_strat
, bool allow_sync
,
80 static XLogRecPtr
log_heap_update(Relation reln
, Buffer oldbuf
,
81 ItemPointerData from
, Buffer newbuf
, HeapTuple newtup
, bool move
);
82 static bool HeapSatisfiesHOTUpdate(Relation relation
, Bitmapset
*hot_attrs
,
83 HeapTuple oldtup
, HeapTuple newtup
);
86 /* ----------------------------------------------------------------
87 * heap support routines
88 * ----------------------------------------------------------------
92 * initscan - scan code common to heap_beginscan and heap_rescan
96 initscan(HeapScanDesc scan
, ScanKey key
, bool is_rescan
)
102 * Determine the number of blocks we have to scan.
104 * It is sufficient to do this once at scan start, since any tuples added
105 * while the scan is in progress will be invisible to my snapshot anyway.
106 * (That is not true when using a non-MVCC snapshot. However, we couldn't
107 * guarantee to return tuples added after scan start anyway, since they
108 * might go into pages we already scanned. To guarantee consistent
109 * results for a non-MVCC snapshot, the caller must hold some higher-level
110 * lock that ensures the interesting tuple(s) won't change.)
112 scan
->rs_nblocks
= RelationGetNumberOfBlocks(scan
->rs_rd
);
115 * If the table is large relative to NBuffers, use a bulk-read access
116 * strategy and enable synchronized scanning (see syncscan.c). Although
117 * the thresholds for these features could be different, we make them the
118 * same so that there are only two behaviors to tune rather than four.
119 * (However, some callers need to be able to disable one or both of these
120 * behaviors, independently of the size of the table; also there is a GUC
121 * variable that can disable synchronized scanning.)
123 * During a rescan, don't make a new strategy object if we don't have to.
125 if (!scan
->rs_rd
->rd_istemp
&&
126 scan
->rs_nblocks
> NBuffers
/ 4)
128 allow_strat
= scan
->rs_allow_strat
;
129 allow_sync
= scan
->rs_allow_sync
;
132 allow_strat
= allow_sync
= false;
136 if (scan
->rs_strategy
== NULL
)
137 scan
->rs_strategy
= GetAccessStrategy(BAS_BULKREAD
);
141 if (scan
->rs_strategy
!= NULL
)
142 FreeAccessStrategy(scan
->rs_strategy
);
143 scan
->rs_strategy
= NULL
;
149 * If rescan, keep the previous startblock setting so that rewinding a
150 * cursor doesn't generate surprising results. Reset the syncscan
153 scan
->rs_syncscan
= (allow_sync
&& synchronize_seqscans
);
155 else if (allow_sync
&& synchronize_seqscans
)
157 scan
->rs_syncscan
= true;
158 scan
->rs_startblock
= ss_get_location(scan
->rs_rd
, scan
->rs_nblocks
);
162 scan
->rs_syncscan
= false;
163 scan
->rs_startblock
= 0;
166 scan
->rs_inited
= false;
167 scan
->rs_ctup
.t_data
= NULL
;
168 ItemPointerSetInvalid(&scan
->rs_ctup
.t_self
);
169 scan
->rs_cbuf
= InvalidBuffer
;
170 scan
->rs_cblock
= InvalidBlockNumber
;
172 /* we don't have a marked position... */
173 ItemPointerSetInvalid(&(scan
->rs_mctid
));
175 /* page-at-a-time fields are always invalid when not rs_inited */
178 * copy the scan key, if appropriate
181 memcpy(scan
->rs_key
, key
, scan
->rs_nkeys
* sizeof(ScanKeyData
));
184 * Currently, we don't have a stats counter for bitmap heap scans (but the
185 * underlying bitmap index scans will be counted).
187 if (!scan
->rs_bitmapscan
)
188 pgstat_count_heap_scan(scan
->rs_rd
);
192 * heapgetpage - subroutine for heapgettup()
194 * This routine reads and pins the specified page of the relation.
195 * In page-at-a-time mode it performs additional work, namely determining
196 * which tuples on the page are visible.
199 heapgetpage(HeapScanDesc scan
, BlockNumber page
)
206 OffsetNumber lineoff
;
210 Assert(page
< scan
->rs_nblocks
);
212 /* release previous scan buffer, if any */
213 if (BufferIsValid(scan
->rs_cbuf
))
215 ReleaseBuffer(scan
->rs_cbuf
);
216 scan
->rs_cbuf
= InvalidBuffer
;
219 /* read page using selected strategy */
220 scan
->rs_cbuf
= ReadBufferExtended(scan
->rs_rd
, MAIN_FORKNUM
, page
,
221 RBM_NORMAL
, scan
->rs_strategy
);
222 scan
->rs_cblock
= page
;
224 if (!scan
->rs_pageatatime
)
227 buffer
= scan
->rs_cbuf
;
228 snapshot
= scan
->rs_snapshot
;
231 * Prune and repair fragmentation for the whole page, if possible.
233 Assert(TransactionIdIsValid(RecentGlobalXmin
));
234 heap_page_prune_opt(scan
->rs_rd
, buffer
, RecentGlobalXmin
);
237 * We must hold share lock on the buffer content while examining tuple
238 * visibility. Afterwards, however, the tuples we have found to be
239 * visible are guaranteed good as long as we hold the buffer pin.
241 LockBuffer(buffer
, BUFFER_LOCK_SHARE
);
243 dp
= (Page
) BufferGetPage(buffer
);
244 lines
= PageGetMaxOffsetNumber(dp
);
248 * If the all-visible flag indicates that all tuples on the page are
249 * visible to everyone, we can skip the per-tuple visibility tests.
251 all_visible
= PageIsAllVisible(dp
);
253 for (lineoff
= FirstOffsetNumber
, lpp
= PageGetItemId(dp
, lineoff
);
257 if (ItemIdIsNormal(lpp
))
265 HeapTupleData loctup
;
267 loctup
.t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lpp
);
268 loctup
.t_len
= ItemIdGetLength(lpp
);
269 ItemPointerSet(&(loctup
.t_self
), page
, lineoff
);
271 valid
= HeapTupleSatisfiesVisibility(&loctup
, snapshot
, buffer
);
274 scan
->rs_vistuples
[ntup
++] = lineoff
;
278 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
280 Assert(ntup
<= MaxHeapTuplesPerPage
);
281 scan
->rs_ntuples
= ntup
;
285 * heapgettup - fetch next heap tuple
287 * Initialize the scan if not already done; then advance to the next
288 * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
289 * or set scan->rs_ctup.t_data = NULL if no more tuples.
291 * dir == NoMovementScanDirection means "re-fetch the tuple indicated
294 * Note: the reason nkeys/key are passed separately, even though they are
295 * kept in the scan descriptor, is that the caller may not want us to check
298 * Note: when we fall off the end of the scan in either direction, we
299 * reset rs_inited. This means that a further request with the same
300 * scan direction will restart the scan, which is a bit odd, but a
301 * request with the opposite scan direction will start a fresh scan
302 * in the proper direction. The latter is required behavior for cursors,
303 * while the former case is generally undefined behavior in Postgres
304 * so we don't care too much.
308 heapgettup(HeapScanDesc scan
,
313 HeapTuple tuple
= &(scan
->rs_ctup
);
314 Snapshot snapshot
= scan
->rs_snapshot
;
315 bool backward
= ScanDirectionIsBackward(dir
);
320 OffsetNumber lineoff
;
325 * calculate next starting lineoff, given scan direction
327 if (ScanDirectionIsForward(dir
))
329 if (!scan
->rs_inited
)
332 * return null immediately if relation is empty
334 if (scan
->rs_nblocks
== 0)
336 Assert(!BufferIsValid(scan
->rs_cbuf
));
337 tuple
->t_data
= NULL
;
340 page
= scan
->rs_startblock
; /* first page */
341 heapgetpage(scan
, page
);
342 lineoff
= FirstOffsetNumber
; /* first offnum */
343 scan
->rs_inited
= true;
347 /* continue from previously returned page/tuple */
348 page
= scan
->rs_cblock
; /* current page */
349 lineoff
= /* next offnum */
350 OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple
->t_self
)));
353 LockBuffer(scan
->rs_cbuf
, BUFFER_LOCK_SHARE
);
355 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
356 lines
= PageGetMaxOffsetNumber(dp
);
357 /* page and lineoff now reference the physically next tid */
359 linesleft
= lines
- lineoff
+ 1;
363 if (!scan
->rs_inited
)
366 * return null immediately if relation is empty
368 if (scan
->rs_nblocks
== 0)
370 Assert(!BufferIsValid(scan
->rs_cbuf
));
371 tuple
->t_data
= NULL
;
376 * Disable reporting to syncscan logic in a backwards scan; it's
377 * not very likely anyone else is doing the same thing at the same
378 * time, and much more likely that we'll just bollix things for
381 scan
->rs_syncscan
= false;
382 /* start from last page of the scan */
383 if (scan
->rs_startblock
> 0)
384 page
= scan
->rs_startblock
- 1;
386 page
= scan
->rs_nblocks
- 1;
387 heapgetpage(scan
, page
);
391 /* continue from previously returned page/tuple */
392 page
= scan
->rs_cblock
; /* current page */
395 LockBuffer(scan
->rs_cbuf
, BUFFER_LOCK_SHARE
);
397 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
398 lines
= PageGetMaxOffsetNumber(dp
);
400 if (!scan
->rs_inited
)
402 lineoff
= lines
; /* final offnum */
403 scan
->rs_inited
= true;
407 lineoff
= /* previous offnum */
408 OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple
->t_self
)));
410 /* page and lineoff now reference the physically previous tid */
417 * ``no movement'' scan direction: refetch prior tuple
419 if (!scan
->rs_inited
)
421 Assert(!BufferIsValid(scan
->rs_cbuf
));
422 tuple
->t_data
= NULL
;
426 page
= ItemPointerGetBlockNumber(&(tuple
->t_self
));
427 if (page
!= scan
->rs_cblock
)
428 heapgetpage(scan
, page
);
430 /* Since the tuple was previously fetched, needn't lock page here */
431 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
432 lineoff
= ItemPointerGetOffsetNumber(&(tuple
->t_self
));
433 lpp
= PageGetItemId(dp
, lineoff
);
434 Assert(ItemIdIsNormal(lpp
));
436 tuple
->t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lpp
);
437 tuple
->t_len
= ItemIdGetLength(lpp
);
443 * advance the scan until we find a qualifying tuple or run out of stuff
446 lpp
= PageGetItemId(dp
, lineoff
);
449 while (linesleft
> 0)
451 if (ItemIdIsNormal(lpp
))
455 tuple
->t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lpp
);
456 tuple
->t_len
= ItemIdGetLength(lpp
);
457 ItemPointerSet(&(tuple
->t_self
), page
, lineoff
);
460 * if current tuple qualifies, return it.
462 valid
= HeapTupleSatisfiesVisibility(tuple
,
466 if (valid
&& key
!= NULL
)
467 HeapKeyTest(tuple
, RelationGetDescr(scan
->rs_rd
),
472 LockBuffer(scan
->rs_cbuf
, BUFFER_LOCK_UNLOCK
);
478 * otherwise move to the next item on the page
483 --lpp
; /* move back in this page's ItemId array */
488 ++lpp
; /* move forward in this page's ItemId array */
494 * if we get here, it means we've exhausted the items on this page and
495 * it's time to move to the next.
497 LockBuffer(scan
->rs_cbuf
, BUFFER_LOCK_UNLOCK
);
500 * advance to next/prior page and detect end of scan
504 finished
= (page
== scan
->rs_startblock
);
506 page
= scan
->rs_nblocks
;
512 if (page
>= scan
->rs_nblocks
)
514 finished
= (page
== scan
->rs_startblock
);
517 * Report our new scan position for synchronization purposes. We
518 * don't do that when moving backwards, however. That would just
519 * mess up any other forward-moving scanners.
521 * Note: we do this before checking for end of scan so that the
522 * final state of the position hint is back at the start of the
523 * rel. That's not strictly necessary, but otherwise when you run
524 * the same query multiple times the starting position would shift
525 * a little bit backwards on every invocation, which is confusing.
526 * We don't guarantee any specific ordering in general, though.
528 if (scan
->rs_syncscan
)
529 ss_report_location(scan
->rs_rd
, page
);
533 * return NULL if we've exhausted all the pages
537 if (BufferIsValid(scan
->rs_cbuf
))
538 ReleaseBuffer(scan
->rs_cbuf
);
539 scan
->rs_cbuf
= InvalidBuffer
;
540 scan
->rs_cblock
= InvalidBlockNumber
;
541 tuple
->t_data
= NULL
;
542 scan
->rs_inited
= false;
546 heapgetpage(scan
, page
);
548 LockBuffer(scan
->rs_cbuf
, BUFFER_LOCK_SHARE
);
550 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
551 lines
= PageGetMaxOffsetNumber((Page
) dp
);
556 lpp
= PageGetItemId(dp
, lines
);
560 lineoff
= FirstOffsetNumber
;
561 lpp
= PageGetItemId(dp
, FirstOffsetNumber
);
567 * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
569 * Same API as heapgettup, but used in page-at-a-time mode
571 * The internal logic is much the same as heapgettup's too, but there are some
572 * differences: we do not take the buffer content lock (that only needs to
573 * happen inside heapgetpage), and we iterate through just the tuples listed
574 * in rs_vistuples[] rather than all tuples on the page. Notice that
575 * lineindex is 0-based, where the corresponding loop variable lineoff in
576 * heapgettup is 1-based.
580 heapgettup_pagemode(HeapScanDesc scan
,
585 HeapTuple tuple
= &(scan
->rs_ctup
);
586 bool backward
= ScanDirectionIsBackward(dir
);
592 OffsetNumber lineoff
;
597 * calculate next starting lineindex, given scan direction
599 if (ScanDirectionIsForward(dir
))
601 if (!scan
->rs_inited
)
604 * return null immediately if relation is empty
606 if (scan
->rs_nblocks
== 0)
608 Assert(!BufferIsValid(scan
->rs_cbuf
));
609 tuple
->t_data
= NULL
;
612 page
= scan
->rs_startblock
; /* first page */
613 heapgetpage(scan
, page
);
615 scan
->rs_inited
= true;
619 /* continue from previously returned page/tuple */
620 page
= scan
->rs_cblock
; /* current page */
621 lineindex
= scan
->rs_cindex
+ 1;
624 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
625 lines
= scan
->rs_ntuples
;
626 /* page and lineindex now reference the next visible tid */
628 linesleft
= lines
- lineindex
;
632 if (!scan
->rs_inited
)
635 * return null immediately if relation is empty
637 if (scan
->rs_nblocks
== 0)
639 Assert(!BufferIsValid(scan
->rs_cbuf
));
640 tuple
->t_data
= NULL
;
645 * Disable reporting to syncscan logic in a backwards scan; it's
646 * not very likely anyone else is doing the same thing at the same
647 * time, and much more likely that we'll just bollix things for
650 scan
->rs_syncscan
= false;
651 /* start from last page of the scan */
652 if (scan
->rs_startblock
> 0)
653 page
= scan
->rs_startblock
- 1;
655 page
= scan
->rs_nblocks
- 1;
656 heapgetpage(scan
, page
);
660 /* continue from previously returned page/tuple */
661 page
= scan
->rs_cblock
; /* current page */
664 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
665 lines
= scan
->rs_ntuples
;
667 if (!scan
->rs_inited
)
669 lineindex
= lines
- 1;
670 scan
->rs_inited
= true;
674 lineindex
= scan
->rs_cindex
- 1;
676 /* page and lineindex now reference the previous visible tid */
678 linesleft
= lineindex
+ 1;
683 * ``no movement'' scan direction: refetch prior tuple
685 if (!scan
->rs_inited
)
687 Assert(!BufferIsValid(scan
->rs_cbuf
));
688 tuple
->t_data
= NULL
;
692 page
= ItemPointerGetBlockNumber(&(tuple
->t_self
));
693 if (page
!= scan
->rs_cblock
)
694 heapgetpage(scan
, page
);
696 /* Since the tuple was previously fetched, needn't lock page here */
697 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
698 lineoff
= ItemPointerGetOffsetNumber(&(tuple
->t_self
));
699 lpp
= PageGetItemId(dp
, lineoff
);
700 Assert(ItemIdIsNormal(lpp
));
702 tuple
->t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lpp
);
703 tuple
->t_len
= ItemIdGetLength(lpp
);
705 /* check that rs_cindex is in sync */
706 Assert(scan
->rs_cindex
< scan
->rs_ntuples
);
707 Assert(lineoff
== scan
->rs_vistuples
[scan
->rs_cindex
]);
713 * advance the scan until we find a qualifying tuple or run out of stuff
718 while (linesleft
> 0)
720 lineoff
= scan
->rs_vistuples
[lineindex
];
721 lpp
= PageGetItemId(dp
, lineoff
);
722 Assert(ItemIdIsNormal(lpp
));
724 tuple
->t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lpp
);
725 tuple
->t_len
= ItemIdGetLength(lpp
);
726 ItemPointerSet(&(tuple
->t_self
), page
, lineoff
);
729 * if current tuple qualifies, return it.
735 HeapKeyTest(tuple
, RelationGetDescr(scan
->rs_rd
),
739 scan
->rs_cindex
= lineindex
;
745 scan
->rs_cindex
= lineindex
;
750 * otherwise move to the next item on the page
760 * if we get here, it means we've exhausted the items on this page and
761 * it's time to move to the next.
765 finished
= (page
== scan
->rs_startblock
);
767 page
= scan
->rs_nblocks
;
773 if (page
>= scan
->rs_nblocks
)
775 finished
= (page
== scan
->rs_startblock
);
778 * Report our new scan position for synchronization purposes. We
779 * don't do that when moving backwards, however. That would just
780 * mess up any other forward-moving scanners.
782 * Note: we do this before checking for end of scan so that the
783 * final state of the position hint is back at the start of the
784 * rel. That's not strictly necessary, but otherwise when you run
785 * the same query multiple times the starting position would shift
786 * a little bit backwards on every invocation, which is confusing.
787 * We don't guarantee any specific ordering in general, though.
789 if (scan
->rs_syncscan
)
790 ss_report_location(scan
->rs_rd
, page
);
794 * return NULL if we've exhausted all the pages
798 if (BufferIsValid(scan
->rs_cbuf
))
799 ReleaseBuffer(scan
->rs_cbuf
);
800 scan
->rs_cbuf
= InvalidBuffer
;
801 scan
->rs_cblock
= InvalidBlockNumber
;
802 tuple
->t_data
= NULL
;
803 scan
->rs_inited
= false;
807 heapgetpage(scan
, page
);
809 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
810 lines
= scan
->rs_ntuples
;
813 lineindex
= lines
- 1;
820 #if defined(DISABLE_COMPLEX_MACRO)
822 * This is formatted so oddly so that the correspondence to the macro
823 * definition in access/heapam.h is maintained.
826 fastgetattr(HeapTuple tup
, int attnum
, TupleDesc tupleDesc
,
832 ((isnull
) ? (*(isnull
) = false) : (dummyret
) NULL
),
833 HeapTupleNoNulls(tup
) ?
835 (tupleDesc
)->attrs
[(attnum
) - 1]->attcacheoff
>= 0 ?
837 fetchatt((tupleDesc
)->attrs
[(attnum
) - 1],
838 (char *) (tup
)->t_data
+ (tup
)->t_data
->t_hoff
+
839 (tupleDesc
)->attrs
[(attnum
) - 1]->attcacheoff
)
842 nocachegetattr((tup
), (attnum
), (tupleDesc
), (isnull
))
846 att_isnull((attnum
) - 1, (tup
)->t_data
->t_bits
) ?
848 ((isnull
) ? (*(isnull
) = true) : (dummyret
) NULL
),
853 nocachegetattr((tup
), (attnum
), (tupleDesc
), (isnull
))
863 #endif /* defined(DISABLE_COMPLEX_MACRO) */
866 /* ----------------------------------------------------------------
867 * heap access method interface
868 * ----------------------------------------------------------------
872 * relation_open - open any relation by relation OID
874 * If lockmode is not "NoLock", the specified kind of lock is
875 * obtained on the relation. (Generally, NoLock should only be
876 * used if the caller knows it has some appropriate lock on the
879 * An error is raised if the relation does not exist.
881 * NB: a "relation" is anything with a pg_class entry. The caller is
882 * expected to check whether the relkind is something it can handle.
886 relation_open(Oid relationId
, LOCKMODE lockmode
)
890 Assert(lockmode
>= NoLock
&& lockmode
< MAX_LOCKMODES
);
892 /* Get the lock before trying to open the relcache entry */
893 if (lockmode
!= NoLock
)
894 LockRelationOid(relationId
, lockmode
);
896 /* The relcache does all the real work... */
897 r
= RelationIdGetRelation(relationId
);
899 if (!RelationIsValid(r
))
900 elog(ERROR
, "could not open relation with OID %u", relationId
);
902 /* Make note that we've accessed a temporary relation */
904 MyXactAccessedTempRel
= true;
912 * try_relation_open - open any relation by relation OID
914 * Same as relation_open, except return NULL instead of failing
915 * if the relation does not exist.
919 try_relation_open(Oid relationId
, LOCKMODE lockmode
)
923 Assert(lockmode
>= NoLock
&& lockmode
< MAX_LOCKMODES
);
925 /* Get the lock first */
926 if (lockmode
!= NoLock
)
927 LockRelationOid(relationId
, lockmode
);
930 * Now that we have the lock, probe to see if the relation really exists
933 if (!SearchSysCacheExists(RELOID
,
934 ObjectIdGetDatum(relationId
),
937 /* Release useless lock */
938 if (lockmode
!= NoLock
)
939 UnlockRelationOid(relationId
, lockmode
);
944 /* Should be safe to do a relcache load */
945 r
= RelationIdGetRelation(relationId
);
947 if (!RelationIsValid(r
))
948 elog(ERROR
, "could not open relation with OID %u", relationId
);
950 /* Make note that we've accessed a temporary relation */
952 MyXactAccessedTempRel
= true;
960 * relation_openrv - open any relation specified by a RangeVar
962 * Same as relation_open, but the relation is specified by a RangeVar.
966 relation_openrv(const RangeVar
*relation
, LOCKMODE lockmode
)
971 * Check for shared-cache-inval messages before trying to open the
972 * relation. This is needed to cover the case where the name identifies a
973 * rel that has been dropped and recreated since the start of our
974 * transaction: if we don't flush the old syscache entry then we'll latch
975 * onto that entry and suffer an error when we do RelationIdGetRelation.
976 * Note that relation_open does not need to do this, since a relation's
979 * We skip this if asked for NoLock, on the assumption that the caller has
980 * already ensured some appropriate lock is held.
982 if (lockmode
!= NoLock
)
983 AcceptInvalidationMessages();
985 /* Look up the appropriate relation using namespace search */
986 relOid
= RangeVarGetRelid(relation
, false);
988 /* Let relation_open do the rest */
989 return relation_open(relOid
, lockmode
);
993 * try_relation_openrv - open any relation specified by a RangeVar
995 * Same as relation_openrv, but return NULL instead of failing for
996 * relation-not-found. (Note that some other causes, such as
997 * permissions problems, will still result in an ereport.)
1001 try_relation_openrv(const RangeVar
*relation
, LOCKMODE lockmode
)
1006 * Check for shared-cache-inval messages before trying to open the
1007 * relation. This is needed to cover the case where the name identifies a
1008 * rel that has been dropped and recreated since the start of our
1009 * transaction: if we don't flush the old syscache entry then we'll latch
1010 * onto that entry and suffer an error when we do RelationIdGetRelation.
1011 * Note that relation_open does not need to do this, since a relation's
1012 * OID never changes.
1014 * We skip this if asked for NoLock, on the assumption that the caller has
1015 * already ensured some appropriate lock is held.
1017 if (lockmode
!= NoLock
)
1018 AcceptInvalidationMessages();
1020 /* Look up the appropriate relation using namespace search */
1021 relOid
= RangeVarGetRelid(relation
, true);
1023 /* Return NULL on not-found */
1024 if (!OidIsValid(relOid
))
1027 /* Let relation_open do the rest */
1028 return relation_open(relOid
, lockmode
);
1032 * relation_close - close any relation
1034 * If lockmode is not "NoLock", we then release the specified lock.
1036 * Note that it is often sensible to hold a lock beyond relation_close;
1037 * in that case, the lock is released automatically at xact end.
1041 relation_close(Relation relation
, LOCKMODE lockmode
)
1043 LockRelId relid
= relation
->rd_lockInfo
.lockRelId
;
1045 Assert(lockmode
>= NoLock
&& lockmode
< MAX_LOCKMODES
);
1047 /* The relcache does the real work... */
1048 RelationClose(relation
);
1050 if (lockmode
!= NoLock
)
1051 UnlockRelationId(&relid
, lockmode
);
1056 * heap_open - open a heap relation by relation OID
1058 * This is essentially relation_open plus check that the relation
1059 * is not an index nor a composite type. (The caller should also
1060 * check that it's not a view before assuming it has storage.)
1064 heap_open(Oid relationId
, LOCKMODE lockmode
)
1068 r
= relation_open(relationId
, lockmode
);
1070 if (r
->rd_rel
->relkind
== RELKIND_INDEX
)
1072 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1073 errmsg("\"%s\" is an index",
1074 RelationGetRelationName(r
))));
1075 else if (r
->rd_rel
->relkind
== RELKIND_COMPOSITE_TYPE
)
1077 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1078 errmsg("\"%s\" is a composite type",
1079 RelationGetRelationName(r
))));
1085 * heap_openrv - open a heap relation specified
1086 * by a RangeVar node
1088 * As above, but relation is specified by a RangeVar.
1092 heap_openrv(const RangeVar
*relation
, LOCKMODE lockmode
)
1096 r
= relation_openrv(relation
, lockmode
);
1098 if (r
->rd_rel
->relkind
== RELKIND_INDEX
)
1100 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1101 errmsg("\"%s\" is an index",
1102 RelationGetRelationName(r
))));
1103 else if (r
->rd_rel
->relkind
== RELKIND_COMPOSITE_TYPE
)
1105 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1106 errmsg("\"%s\" is a composite type",
1107 RelationGetRelationName(r
))));
1113 * try_heap_openrv - open a heap relation specified
1114 * by a RangeVar node
1116 * As above, but return NULL instead of failing for relation-not-found.
1120 try_heap_openrv(const RangeVar
*relation
, LOCKMODE lockmode
)
1124 r
= try_relation_openrv(relation
, lockmode
);
1128 if (r
->rd_rel
->relkind
== RELKIND_INDEX
)
1130 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1131 errmsg("\"%s\" is an index",
1132 RelationGetRelationName(r
))));
1133 else if (r
->rd_rel
->relkind
== RELKIND_COMPOSITE_TYPE
)
1135 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1136 errmsg("\"%s\" is a composite type",
1137 RelationGetRelationName(r
))));
1145 * heap_beginscan - begin relation scan
1147 * heap_beginscan_strat offers an extended API that lets the caller control
1148 * whether a nondefault buffer access strategy can be used, and whether
1149 * syncscan can be chosen (possibly resulting in the scan not starting from
1150 * block zero). Both of these default to TRUE with plain heap_beginscan.
1152 * heap_beginscan_bm is an alternative entry point for setting up a
1153 * HeapScanDesc for a bitmap heap scan. Although that scan technology is
1154 * really quite unlike a standard seqscan, there is just enough commonality
1155 * to make it worth using the same data structure.
1159 heap_beginscan(Relation relation
, Snapshot snapshot
,
1160 int nkeys
, ScanKey key
)
1162 return heap_beginscan_internal(relation
, snapshot
, nkeys
, key
,
1167 heap_beginscan_strat(Relation relation
, Snapshot snapshot
,
1168 int nkeys
, ScanKey key
,
1169 bool allow_strat
, bool allow_sync
)
1171 return heap_beginscan_internal(relation
, snapshot
, nkeys
, key
,
1172 allow_strat
, allow_sync
, false);
1176 heap_beginscan_bm(Relation relation
, Snapshot snapshot
,
1177 int nkeys
, ScanKey key
)
1179 return heap_beginscan_internal(relation
, snapshot
, nkeys
, key
,
1180 false, false, true);
1184 heap_beginscan_internal(Relation relation
, Snapshot snapshot
,
1185 int nkeys
, ScanKey key
,
1186 bool allow_strat
, bool allow_sync
,
1192 * increment relation ref count while scanning relation
1194 * This is just to make really sure the relcache entry won't go away while
1195 * the scan has a pointer to it. Caller should be holding the rel open
1196 * anyway, so this is redundant in all normal scenarios...
1198 RelationIncrementReferenceCount(relation
);
1201 * allocate and initialize scan descriptor
1203 scan
= (HeapScanDesc
) palloc(sizeof(HeapScanDescData
));
1205 scan
->rs_rd
= relation
;
1206 scan
->rs_snapshot
= snapshot
;
1207 scan
->rs_nkeys
= nkeys
;
1208 scan
->rs_bitmapscan
= is_bitmapscan
;
1209 scan
->rs_strategy
= NULL
; /* set in initscan */
1210 scan
->rs_allow_strat
= allow_strat
;
1211 scan
->rs_allow_sync
= allow_sync
;
1214 * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1216 scan
->rs_pageatatime
= IsMVCCSnapshot(snapshot
);
1218 /* we only need to set this up once */
1219 scan
->rs_ctup
.t_tableOid
= RelationGetRelid(relation
);
1222 * we do this here instead of in initscan() because heap_rescan also calls
1223 * initscan() and we don't want to allocate memory again
1226 scan
->rs_key
= (ScanKey
) palloc(sizeof(ScanKeyData
) * nkeys
);
1228 scan
->rs_key
= NULL
;
1230 initscan(scan
, key
, false);
1236 * heap_rescan - restart a relation scan
1240 heap_rescan(HeapScanDesc scan
,
1244 * unpin scan buffers
1246 if (BufferIsValid(scan
->rs_cbuf
))
1247 ReleaseBuffer(scan
->rs_cbuf
);
1250 * reinitialize scan descriptor
1252 initscan(scan
, key
, true);
1256 * heap_endscan - end relation scan
1258 * See how to integrate with index scans.
1259 * Check handling if reldesc caching.
1263 heap_endscan(HeapScanDesc scan
)
1265 /* Note: no locking manipulations needed */
1268 * unpin scan buffers
1270 if (BufferIsValid(scan
->rs_cbuf
))
1271 ReleaseBuffer(scan
->rs_cbuf
);
1274 * decrement relation reference count and free scan descriptor storage
1276 RelationDecrementReferenceCount(scan
->rs_rd
);
1279 pfree(scan
->rs_key
);
1281 if (scan
->rs_strategy
!= NULL
)
1282 FreeAccessStrategy(scan
->rs_strategy
);
1288 * heap_getnext - retrieve next tuple in scan
1290 * Fix to work with index relations.
1291 * We don't return the buffer anymore, but you can get it from the
1292 * returned HeapTuple.
1297 #define HEAPDEBUG_1 \
1298 elog(DEBUG2, "heap_getnext([%s,nkeys=%d],dir=%d) called", \
1299 RelationGetRelationName(scan->rs_rd), scan->rs_nkeys, (int) direction)
1300 #define HEAPDEBUG_2 \
1301 elog(DEBUG2, "heap_getnext returning EOS")
1302 #define HEAPDEBUG_3 \
1303 elog(DEBUG2, "heap_getnext returning tuple")
1308 #endif /* !defined(HEAPDEBUGALL) */
1312 heap_getnext(HeapScanDesc scan
, ScanDirection direction
)
1314 /* Note: no locking manipulations needed */
1316 HEAPDEBUG_1
; /* heap_getnext( info ) */
1318 if (scan
->rs_pageatatime
)
1319 heapgettup_pagemode(scan
, direction
,
1320 scan
->rs_nkeys
, scan
->rs_key
);
1322 heapgettup(scan
, direction
, scan
->rs_nkeys
, scan
->rs_key
);
1324 if (scan
->rs_ctup
.t_data
== NULL
)
1326 HEAPDEBUG_2
; /* heap_getnext returning EOS */
1331 * if we get here it means we have a new current scan tuple, so point to
1332 * the proper return buffer and return the tuple.
1334 HEAPDEBUG_3
; /* heap_getnext returning tuple */
1336 pgstat_count_heap_getnext(scan
->rs_rd
);
1338 return &(scan
->rs_ctup
);
1342 * heap_fetch - retrieve tuple with given tid
1344 * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1345 * the tuple, fill in the remaining fields of *tuple, and check the tuple
1346 * against the specified snapshot.
1348 * If successful (tuple found and passes snapshot time qual), then *userbuf
1349 * is set to the buffer holding the tuple and TRUE is returned. The caller
1350 * must unpin the buffer when done with the tuple.
1352 * If the tuple is not found (ie, item number references a deleted slot),
1353 * then tuple->t_data is set to NULL and FALSE is returned.
1355 * If the tuple is found but fails the time qual check, then FALSE is returned
1356 * but tuple->t_data is left pointing to the tuple.
1358 * keep_buf determines what is done with the buffer in the FALSE-result cases.
1359 * When the caller specifies keep_buf = true, we retain the pin on the buffer
1360 * and return it in *userbuf (so the caller must eventually unpin it); when
1361 * keep_buf = false, the pin is released and *userbuf is set to InvalidBuffer.
1363 * stats_relation is the relation to charge the heap_fetch operation against
1364 * for statistical purposes. (This could be the heap rel itself, an
1365 * associated index, or NULL to not count the fetch at all.)
1367 * heap_fetch does not follow HOT chains: only the exact TID requested will
1370 * It is somewhat inconsistent that we ereport() on invalid block number but
1371 * return false on invalid item number. There are a couple of reasons though.
1372 * One is that the caller can relatively easily check the block number for
1373 * validity, but cannot check the item number without reading the page
1374 * himself. Another is that when we are following a t_ctid link, we can be
1375 * reasonably confident that the page number is valid (since VACUUM shouldn't
1376 * truncate off the destination page without having killed the referencing
1377 * tuple first), but the item number might well not be good.
1380 heap_fetch(Relation relation
,
1385 Relation stats_relation
)
1387 ItemPointer tid
= &(tuple
->t_self
);
1391 OffsetNumber offnum
;
1395 * Fetch and pin the appropriate page of the relation.
1397 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(tid
));
1400 * Need share lock on buffer to examine tuple commit status.
1402 LockBuffer(buffer
, BUFFER_LOCK_SHARE
);
1403 page
= BufferGetPage(buffer
);
1406 * We'd better check for out-of-range offnum in case of VACUUM since the
1409 offnum
= ItemPointerGetOffsetNumber(tid
);
1410 if (offnum
< FirstOffsetNumber
|| offnum
> PageGetMaxOffsetNumber(page
))
1412 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
1417 ReleaseBuffer(buffer
);
1418 *userbuf
= InvalidBuffer
;
1420 tuple
->t_data
= NULL
;
1425 * get the item line pointer corresponding to the requested tid
1427 lp
= PageGetItemId(page
, offnum
);
1430 * Must check for deleted tuple.
1432 if (!ItemIdIsNormal(lp
))
1434 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
1439 ReleaseBuffer(buffer
);
1440 *userbuf
= InvalidBuffer
;
1442 tuple
->t_data
= NULL
;
1447 * fill in *tuple fields
1449 tuple
->t_data
= (HeapTupleHeader
) PageGetItem(page
, lp
);
1450 tuple
->t_len
= ItemIdGetLength(lp
);
1451 tuple
->t_tableOid
= RelationGetRelid(relation
);
1454 * check time qualification of tuple, then release lock
1456 valid
= HeapTupleSatisfiesVisibility(tuple
, snapshot
, buffer
);
1458 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
1463 * All checks passed, so return the tuple as valid. Caller is now
1464 * responsible for releasing the buffer.
1468 /* Count the successful fetch against appropriate rel, if any */
1469 if (stats_relation
!= NULL
)
1470 pgstat_count_heap_fetch(stats_relation
);
1475 /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1480 ReleaseBuffer(buffer
);
1481 *userbuf
= InvalidBuffer
;
1488 * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
1490 * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
1491 * of a HOT chain), and buffer is the buffer holding this tuple. We search
1492 * for the first chain member satisfying the given snapshot. If one is
1493 * found, we update *tid to reference that tuple's offset number, and
1494 * return TRUE. If no match, return FALSE without modifying *tid.
1496 * If all_dead is not NULL, we check non-visible tuples to see if they are
1497 * globally dead; *all_dead is set TRUE if all members of the HOT chain
1498 * are vacuumable, FALSE if not.
1500 * Unlike heap_fetch, the caller must already have pin and (at least) share
1501 * lock on the buffer; it is still pinned/locked at exit. Also unlike
1502 * heap_fetch, we do not report any pgstats count; caller may do so if wanted.
1505 heap_hot_search_buffer(ItemPointer tid
, Buffer buffer
, Snapshot snapshot
,
1508 Page dp
= (Page
) BufferGetPage(buffer
);
1509 TransactionId prev_xmax
= InvalidTransactionId
;
1510 OffsetNumber offnum
;
1511 bool at_chain_start
;
1516 Assert(TransactionIdIsValid(RecentGlobalXmin
));
1518 Assert(ItemPointerGetBlockNumber(tid
) == BufferGetBlockNumber(buffer
));
1519 offnum
= ItemPointerGetOffsetNumber(tid
);
1520 at_chain_start
= true;
1522 /* Scan through possible multiple members of HOT-chain */
1526 HeapTupleData heapTuple
;
1528 /* check for bogus TID */
1529 if (offnum
< FirstOffsetNumber
|| offnum
> PageGetMaxOffsetNumber(dp
))
1532 lp
= PageGetItemId(dp
, offnum
);
1534 /* check for unused, dead, or redirected items */
1535 if (!ItemIdIsNormal(lp
))
1537 /* We should only see a redirect at start of chain */
1538 if (ItemIdIsRedirected(lp
) && at_chain_start
)
1540 /* Follow the redirect */
1541 offnum
= ItemIdGetRedirect(lp
);
1542 at_chain_start
= false;
1545 /* else must be end of chain */
1549 heapTuple
.t_data
= (HeapTupleHeader
) PageGetItem(dp
, lp
);
1550 heapTuple
.t_len
= ItemIdGetLength(lp
);
1553 * Shouldn't see a HEAP_ONLY tuple at chain start.
1555 if (at_chain_start
&& HeapTupleIsHeapOnly(&heapTuple
))
1559 * The xmin should match the previous xmax value, else chain is
1562 if (TransactionIdIsValid(prev_xmax
) &&
1563 !TransactionIdEquals(prev_xmax
,
1564 HeapTupleHeaderGetXmin(heapTuple
.t_data
)))
1567 /* If it's visible per the snapshot, we must return it */
1568 if (HeapTupleSatisfiesVisibility(&heapTuple
, snapshot
, buffer
))
1570 ItemPointerSetOffsetNumber(tid
, offnum
);
1577 * If we can't see it, maybe no one else can either. At caller
1578 * request, check whether all chain members are dead to all
1581 if (all_dead
&& *all_dead
&&
1582 HeapTupleSatisfiesVacuum(heapTuple
.t_data
, RecentGlobalXmin
,
1583 buffer
) != HEAPTUPLE_DEAD
)
1587 * Check to see if HOT chain continues past this tuple; if so fetch
1588 * the next offnum and loop around.
1590 if (HeapTupleIsHotUpdated(&heapTuple
))
1592 Assert(ItemPointerGetBlockNumber(&heapTuple
.t_data
->t_ctid
) ==
1593 ItemPointerGetBlockNumber(tid
));
1594 offnum
= ItemPointerGetOffsetNumber(&heapTuple
.t_data
->t_ctid
);
1595 at_chain_start
= false;
1596 prev_xmax
= HeapTupleHeaderGetXmax(heapTuple
.t_data
);
1599 break; /* end of chain */
1606 * heap_hot_search - search HOT chain for tuple satisfying snapshot
1608 * This has the same API as heap_hot_search_buffer, except that the caller
1609 * does not provide the buffer containing the page, rather we access it
1613 heap_hot_search(ItemPointer tid
, Relation relation
, Snapshot snapshot
,
1619 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(tid
));
1620 LockBuffer(buffer
, BUFFER_LOCK_SHARE
);
1621 result
= heap_hot_search_buffer(tid
, buffer
, snapshot
, all_dead
);
1622 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
1623 ReleaseBuffer(buffer
);
1628 * heap_get_latest_tid - get the latest tid of a specified tuple
1630 * Actually, this gets the latest version that is visible according to
1631 * the passed snapshot. You can pass SnapshotDirty to get the very latest,
1632 * possibly uncommitted version.
1634 * *tid is both an input and an output parameter: it is updated to
1635 * show the latest version of the row. Note that it will not be changed
1636 * if no version of the row passes the snapshot test.
1639 heap_get_latest_tid(Relation relation
,
1644 ItemPointerData ctid
;
1645 TransactionId priorXmax
;
1647 /* this is to avoid Assert failures on bad input */
1648 if (!ItemPointerIsValid(tid
))
1652 * Since this can be called with user-supplied TID, don't trust the input
1653 * too much. (RelationGetNumberOfBlocks is an expensive check, so we
1654 * don't check t_ctid links again this way. Note that it would not do to
1655 * call it just once and save the result, either.)
1657 blk
= ItemPointerGetBlockNumber(tid
);
1658 if (blk
>= RelationGetNumberOfBlocks(relation
))
1659 elog(ERROR
, "block number %u is out of range for relation \"%s\"",
1660 blk
, RelationGetRelationName(relation
));
1663 * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1664 * need to examine, and *tid is the TID we will return if ctid turns out
1667 * Note that we will loop until we reach the end of the t_ctid chain.
1668 * Depending on the snapshot passed, there might be at most one visible
1669 * version of the row, but we don't try to optimize for that.
1672 priorXmax
= InvalidTransactionId
; /* cannot check first XMIN */
1677 OffsetNumber offnum
;
1683 * Read, pin, and lock the page.
1685 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(&ctid
));
1686 LockBuffer(buffer
, BUFFER_LOCK_SHARE
);
1687 page
= BufferGetPage(buffer
);
1690 * Check for bogus item number. This is not treated as an error
1691 * condition because it can happen while following a t_ctid link. We
1692 * just assume that the prior tid is OK and return it unchanged.
1694 offnum
= ItemPointerGetOffsetNumber(&ctid
);
1695 if (offnum
< FirstOffsetNumber
|| offnum
> PageGetMaxOffsetNumber(page
))
1697 UnlockReleaseBuffer(buffer
);
1700 lp
= PageGetItemId(page
, offnum
);
1701 if (!ItemIdIsNormal(lp
))
1703 UnlockReleaseBuffer(buffer
);
1707 /* OK to access the tuple */
1709 tp
.t_data
= (HeapTupleHeader
) PageGetItem(page
, lp
);
1710 tp
.t_len
= ItemIdGetLength(lp
);
1713 * After following a t_ctid link, we might arrive at an unrelated
1714 * tuple. Check for XMIN match.
1716 if (TransactionIdIsValid(priorXmax
) &&
1717 !TransactionIdEquals(priorXmax
, HeapTupleHeaderGetXmin(tp
.t_data
)))
1719 UnlockReleaseBuffer(buffer
);
1724 * Check time qualification of tuple; if visible, set it as the new
1727 valid
= HeapTupleSatisfiesVisibility(&tp
, snapshot
, buffer
);
1732 * If there's a valid t_ctid link, follow it, else we're done.
1734 if ((tp
.t_data
->t_infomask
& (HEAP_XMAX_INVALID
| HEAP_IS_LOCKED
)) ||
1735 ItemPointerEquals(&tp
.t_self
, &tp
.t_data
->t_ctid
))
1737 UnlockReleaseBuffer(buffer
);
1741 ctid
= tp
.t_data
->t_ctid
;
1742 priorXmax
= HeapTupleHeaderGetXmax(tp
.t_data
);
1743 UnlockReleaseBuffer(buffer
);
1749 * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
1751 * This is called after we have waited for the XMAX transaction to terminate.
1752 * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
1753 * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
1754 * hint bit if possible --- but beware that that may not yet be possible,
1755 * if the transaction committed asynchronously. Hence callers should look
1756 * only at XMAX_INVALID.
1759 UpdateXmaxHintBits(HeapTupleHeader tuple
, Buffer buffer
, TransactionId xid
)
1761 Assert(TransactionIdEquals(HeapTupleHeaderGetXmax(tuple
), xid
));
1763 if (!(tuple
->t_infomask
& (HEAP_XMAX_COMMITTED
| HEAP_XMAX_INVALID
)))
1765 if (TransactionIdDidCommit(xid
))
1766 HeapTupleSetHintBits(tuple
, buffer
, HEAP_XMAX_COMMITTED
,
1769 HeapTupleSetHintBits(tuple
, buffer
, HEAP_XMAX_INVALID
,
1770 InvalidTransactionId
);
1776 * GetBulkInsertState - prepare status object for a bulk insert
1779 GetBulkInsertState(void)
1781 BulkInsertState bistate
;
1783 bistate
= (BulkInsertState
) palloc(sizeof(BulkInsertStateData
));
1784 bistate
->strategy
= GetAccessStrategy(BAS_BULKWRITE
);
1785 bistate
->current_buf
= InvalidBuffer
;
1790 * FreeBulkInsertState - clean up after finishing a bulk insert
1793 FreeBulkInsertState(BulkInsertState bistate
)
1795 if (bistate
->current_buf
!= InvalidBuffer
)
1796 ReleaseBuffer(bistate
->current_buf
);
1797 FreeAccessStrategy(bistate
->strategy
);
1803 * heap_insert - insert tuple into a heap
1805 * The new tuple is stamped with current transaction ID and the specified
1808 * If the HEAP_INSERT_SKIP_WAL option is specified, the new tuple is not
1809 * logged in WAL, even for a non-temp relation. Safe usage of this behavior
1810 * requires that we arrange that all new tuples go into new pages not
1811 * containing any tuples from other transactions, and that the relation gets
1812 * fsync'd before commit. (See also heap_sync() comments)
1814 * The HEAP_INSERT_SKIP_FSM option is passed directly to
1815 * RelationGetBufferForTuple, which see for more info.
1817 * Note that these options will be applied when inserting into the heap's
1818 * TOAST table, too, if the tuple requires any out-of-line data.
1820 * The BulkInsertState object (if any; bistate can be NULL for default
1821 * behavior) is also just passed through to RelationGetBufferForTuple.
1823 * The return value is the OID assigned to the tuple (either here or by the
1824 * caller), or InvalidOid if no OID. The header fields of *tup are updated
1825 * to match the stored tuple; in particular tup->t_self receives the actual
1826 * TID where the tuple was stored. But note that any toasting of fields
1827 * within the tuple data is NOT reflected into *tup.
1830 heap_insert(Relation relation
, HeapTuple tup
, CommandId cid
,
1831 int options
, BulkInsertState bistate
)
1833 TransactionId xid
= GetCurrentTransactionId();
1836 bool all_visible_cleared
= false;
1838 if (relation
->rd_rel
->relhasoids
)
1841 /* this is redundant with an Assert in HeapTupleSetOid */
1842 Assert(tup
->t_data
->t_infomask
& HEAP_HASOID
);
1846 * If the object id of this tuple has already been assigned, trust the
1847 * caller. There are a couple of ways this can happen. At initial db
1848 * creation, the backend program sets oids for tuples. When we define
1849 * an index, we set the oid. Finally, in the future, we may allow
1850 * users to set their own object ids in order to support a persistent
1851 * object store (objects need to contain pointers to one another).
1853 if (!OidIsValid(HeapTupleGetOid(tup
)))
1854 HeapTupleSetOid(tup
, GetNewOid(relation
));
1858 /* check there is not space for an OID */
1859 Assert(!(tup
->t_data
->t_infomask
& HEAP_HASOID
));
1862 tup
->t_data
->t_infomask
&= ~(HEAP_XACT_MASK
);
1863 tup
->t_data
->t_infomask2
&= ~(HEAP2_XACT_MASK
);
1864 tup
->t_data
->t_infomask
|= HEAP_XMAX_INVALID
;
1865 HeapTupleHeaderSetXmin(tup
->t_data
, xid
);
1866 HeapTupleHeaderSetCmin(tup
->t_data
, cid
);
1867 HeapTupleHeaderSetXmax(tup
->t_data
, 0); /* for cleanliness */
1868 tup
->t_tableOid
= RelationGetRelid(relation
);
1871 * If the new tuple is too big for storage or contains already toasted
1872 * out-of-line attributes from some other relation, invoke the toaster.
1874 * Note: below this point, heaptup is the data we actually intend to store
1875 * into the relation; tup is the caller's original untoasted data.
1877 if (relation
->rd_rel
->relkind
!= RELKIND_RELATION
)
1879 /* toast table entries should never be recursively toasted */
1880 Assert(!HeapTupleHasExternal(tup
));
1883 else if (HeapTupleHasExternal(tup
) || tup
->t_len
> TOAST_TUPLE_THRESHOLD
)
1884 heaptup
= toast_insert_or_update(relation
, tup
, NULL
, options
);
1888 /* Find buffer to insert this tuple into */
1889 buffer
= RelationGetBufferForTuple(relation
, heaptup
->t_len
,
1890 InvalidBuffer
, options
, bistate
);
1892 /* NO EREPORT(ERROR) from here till changes are logged */
1893 START_CRIT_SECTION();
1895 RelationPutHeapTuple(relation
, buffer
, heaptup
);
1897 if (PageIsAllVisible(BufferGetPage(buffer
)))
1899 all_visible_cleared
= true;
1900 PageClearAllVisible(BufferGetPage(buffer
));
1904 * XXX Should we set PageSetPrunable on this page ?
1906 * The inserting transaction may eventually abort thus making this tuple
1907 * DEAD and hence available for pruning. Though we don't want to optimize
1908 * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
1909 * aborted tuple will never be pruned until next vacuum is triggered.
1911 * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
1914 MarkBufferDirty(buffer
);
1917 if (!(options
& HEAP_INSERT_SKIP_WAL
) && !relation
->rd_istemp
)
1919 xl_heap_insert xlrec
;
1920 xl_heap_header xlhdr
;
1922 XLogRecData rdata
[3];
1923 Page page
= BufferGetPage(buffer
);
1924 uint8 info
= XLOG_HEAP_INSERT
;
1926 xlrec
.all_visible_cleared
= all_visible_cleared
;
1927 xlrec
.target
.node
= relation
->rd_node
;
1928 xlrec
.target
.tid
= heaptup
->t_self
;
1929 rdata
[0].data
= (char *) &xlrec
;
1930 rdata
[0].len
= SizeOfHeapInsert
;
1931 rdata
[0].buffer
= InvalidBuffer
;
1932 rdata
[0].next
= &(rdata
[1]);
1934 xlhdr
.t_infomask2
= heaptup
->t_data
->t_infomask2
;
1935 xlhdr
.t_infomask
= heaptup
->t_data
->t_infomask
;
1936 xlhdr
.t_hoff
= heaptup
->t_data
->t_hoff
;
1939 * note we mark rdata[1] as belonging to buffer; if XLogInsert decides
1940 * to write the whole page to the xlog, we don't need to store
1941 * xl_heap_header in the xlog.
1943 rdata
[1].data
= (char *) &xlhdr
;
1944 rdata
[1].len
= SizeOfHeapHeader
;
1945 rdata
[1].buffer
= buffer
;
1946 rdata
[1].buffer_std
= true;
1947 rdata
[1].next
= &(rdata
[2]);
1949 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
1950 rdata
[2].data
= (char *) heaptup
->t_data
+ offsetof(HeapTupleHeaderData
, t_bits
);
1951 rdata
[2].len
= heaptup
->t_len
- offsetof(HeapTupleHeaderData
, t_bits
);
1952 rdata
[2].buffer
= buffer
;
1953 rdata
[2].buffer_std
= true;
1954 rdata
[2].next
= NULL
;
1957 * If this is the single and first tuple on page, we can reinit the
1958 * page instead of restoring the whole thing. Set flag, and hide
1959 * buffer references from XLogInsert.
1961 if (ItemPointerGetOffsetNumber(&(heaptup
->t_self
)) == FirstOffsetNumber
&&
1962 PageGetMaxOffsetNumber(page
) == FirstOffsetNumber
)
1964 info
|= XLOG_HEAP_INIT_PAGE
;
1965 rdata
[1].buffer
= rdata
[2].buffer
= InvalidBuffer
;
1968 recptr
= XLogInsert(RM_HEAP_ID
, info
, rdata
);
1970 PageSetLSN(page
, recptr
);
1971 PageSetTLI(page
, ThisTimeLineID
);
1976 UnlockReleaseBuffer(buffer
);
1978 /* Clear the bit in the visibility map if necessary */
1979 if (all_visible_cleared
)
1980 visibilitymap_clear(relation
,
1981 ItemPointerGetBlockNumber(&(heaptup
->t_self
)));
1984 * If tuple is cachable, mark it for invalidation from the caches in case
1985 * we abort. Note it is OK to do this after releasing the buffer, because
1986 * the heaptup data structure is all in local memory, not in the shared
1989 CacheInvalidateHeapTuple(relation
, heaptup
);
1991 pgstat_count_heap_insert(relation
);
1994 * If heaptup is a private copy, release it. Don't forget to copy t_self
1995 * back to the caller's image, too.
1999 tup
->t_self
= heaptup
->t_self
;
2000 heap_freetuple(heaptup
);
2003 return HeapTupleGetOid(tup
);
2007 * simple_heap_insert - insert a tuple
2009 * Currently, this routine differs from heap_insert only in supplying
2010 * a default command ID and not allowing access to the speedup options.
2012 * This should be used rather than using heap_insert directly in most places
2013 * where we are modifying system catalogs.
2016 simple_heap_insert(Relation relation
, HeapTuple tup
)
2018 return heap_insert(relation
, tup
, GetCurrentCommandId(true), 0, NULL
);
2022 * heap_delete - delete a tuple
2024 * NB: do not call this directly unless you are prepared to deal with
2025 * concurrent-update conditions. Use simple_heap_delete instead.
2027 * relation - table to be modified (caller must hold suitable lock)
2028 * tid - TID of tuple to be deleted
2029 * ctid - output parameter, used only for failure case (see below)
2030 * update_xmax - output parameter, used only for failure case (see below)
2031 * cid - delete command ID (used for visibility test, and stored into
2032 * cmax if successful)
2033 * crosscheck - if not InvalidSnapshot, also check tuple against this
2034 * wait - true if should wait for any conflicting update to commit/abort
2036 * Normal, successful return value is HeapTupleMayBeUpdated, which
2037 * actually means we did delete it. Failure return codes are
2038 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
2039 * (the last only possible if wait == false).
2041 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
2042 * If t_ctid is the same as tid, the tuple was deleted; if different, the
2043 * tuple was updated, and t_ctid is the location of the replacement tuple.
2044 * (t_xmax is needed to verify that the replacement tuple matches.)
2047 heap_delete(Relation relation
, ItemPointer tid
,
2048 ItemPointer ctid
, TransactionId
*update_xmax
,
2049 CommandId cid
, Snapshot crosscheck
, bool wait
)
2052 TransactionId xid
= GetCurrentTransactionId();
2057 bool have_tuple_lock
= false;
2059 bool all_visible_cleared
= false;
2061 Assert(ItemPointerIsValid(tid
));
2063 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(tid
));
2064 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2066 page
= BufferGetPage(buffer
);
2067 lp
= PageGetItemId(page
, ItemPointerGetOffsetNumber(tid
));
2068 Assert(ItemIdIsNormal(lp
));
2070 tp
.t_data
= (HeapTupleHeader
) PageGetItem(page
, lp
);
2071 tp
.t_len
= ItemIdGetLength(lp
);
2075 result
= HeapTupleSatisfiesUpdate(tp
.t_data
, cid
, buffer
);
2077 if (result
== HeapTupleInvisible
)
2079 UnlockReleaseBuffer(buffer
);
2080 elog(ERROR
, "attempted to delete invisible tuple");
2082 else if (result
== HeapTupleBeingUpdated
&& wait
)
2084 TransactionId xwait
;
2087 /* must copy state data before unlocking buffer */
2088 xwait
= HeapTupleHeaderGetXmax(tp
.t_data
);
2089 infomask
= tp
.t_data
->t_infomask
;
2091 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2094 * Acquire tuple lock to establish our priority for the tuple (see
2095 * heap_lock_tuple). LockTuple will release us when we are
2096 * next-in-line for the tuple.
2098 * If we are forced to "start over" below, we keep the tuple lock;
2099 * this arranges that we stay at the head of the line while rechecking
2102 if (!have_tuple_lock
)
2104 LockTuple(relation
, &(tp
.t_self
), ExclusiveLock
);
2105 have_tuple_lock
= true;
2109 * Sleep until concurrent transaction ends. Note that we don't care
2110 * if the locker has an exclusive or shared lock, because we need
2114 if (infomask
& HEAP_XMAX_IS_MULTI
)
2116 /* wait for multixact */
2117 MultiXactIdWait((MultiXactId
) xwait
);
2118 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2121 * If xwait had just locked the tuple then some other xact could
2122 * update this tuple before we get to this point. Check for xmax
2123 * change, and start over if so.
2125 if (!(tp
.t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
2126 !TransactionIdEquals(HeapTupleHeaderGetXmax(tp
.t_data
),
2131 * You might think the multixact is necessarily done here, but not
2132 * so: it could have surviving members, namely our own xact or
2133 * other subxacts of this backend. It is legal for us to delete
2134 * the tuple in either case, however (the latter case is
2135 * essentially a situation of upgrading our former shared lock to
2136 * exclusive). We don't bother changing the on-disk hint bits
2137 * since we are about to overwrite the xmax altogether.
2142 /* wait for regular transaction to end */
2143 XactLockTableWait(xwait
);
2144 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2147 * xwait is done, but if xwait had just locked the tuple then some
2148 * other xact could update this tuple before we get to this point.
2149 * Check for xmax change, and start over if so.
2151 if ((tp
.t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
2152 !TransactionIdEquals(HeapTupleHeaderGetXmax(tp
.t_data
),
2156 /* Otherwise check if it committed or aborted */
2157 UpdateXmaxHintBits(tp
.t_data
, buffer
, xwait
);
2161 * We may overwrite if previous xmax aborted, or if it committed but
2162 * only locked the tuple without updating it.
2164 if (tp
.t_data
->t_infomask
& (HEAP_XMAX_INVALID
|
2166 result
= HeapTupleMayBeUpdated
;
2168 result
= HeapTupleUpdated
;
2171 if (crosscheck
!= InvalidSnapshot
&& result
== HeapTupleMayBeUpdated
)
2173 /* Perform additional check for serializable RI updates */
2174 if (!HeapTupleSatisfiesVisibility(&tp
, crosscheck
, buffer
))
2175 result
= HeapTupleUpdated
;
2178 if (result
!= HeapTupleMayBeUpdated
)
2180 Assert(result
== HeapTupleSelfUpdated
||
2181 result
== HeapTupleUpdated
||
2182 result
== HeapTupleBeingUpdated
);
2183 Assert(!(tp
.t_data
->t_infomask
& HEAP_XMAX_INVALID
));
2184 *ctid
= tp
.t_data
->t_ctid
;
2185 *update_xmax
= HeapTupleHeaderGetXmax(tp
.t_data
);
2186 UnlockReleaseBuffer(buffer
);
2187 if (have_tuple_lock
)
2188 UnlockTuple(relation
, &(tp
.t_self
), ExclusiveLock
);
2192 /* replace cid with a combo cid if necessary */
2193 HeapTupleHeaderAdjustCmax(tp
.t_data
, &cid
, &iscombo
);
2195 START_CRIT_SECTION();
2198 * If this transaction commits, the tuple will become DEAD sooner or
2199 * later. Set flag that this page is a candidate for pruning once our xid
2200 * falls below the OldestXmin horizon. If the transaction finally aborts,
2201 * the subsequent page pruning will be a no-op and the hint will be
2204 PageSetPrunable(page
, xid
);
2206 if (PageIsAllVisible(page
))
2208 all_visible_cleared
= true;
2209 PageClearAllVisible(page
);
2212 /* store transaction information of xact deleting the tuple */
2213 tp
.t_data
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
2215 HEAP_XMAX_IS_MULTI
|
2218 HeapTupleHeaderClearHotUpdated(tp
.t_data
);
2219 HeapTupleHeaderSetXmax(tp
.t_data
, xid
);
2220 HeapTupleHeaderSetCmax(tp
.t_data
, cid
, iscombo
);
2221 /* Make sure there is no forward chain link in t_ctid */
2222 tp
.t_data
->t_ctid
= tp
.t_self
;
2224 MarkBufferDirty(buffer
);
2227 if (!relation
->rd_istemp
)
2229 xl_heap_delete xlrec
;
2231 XLogRecData rdata
[2];
2233 xlrec
.all_visible_cleared
= all_visible_cleared
;
2234 xlrec
.target
.node
= relation
->rd_node
;
2235 xlrec
.target
.tid
= tp
.t_self
;
2236 rdata
[0].data
= (char *) &xlrec
;
2237 rdata
[0].len
= SizeOfHeapDelete
;
2238 rdata
[0].buffer
= InvalidBuffer
;
2239 rdata
[0].next
= &(rdata
[1]);
2241 rdata
[1].data
= NULL
;
2243 rdata
[1].buffer
= buffer
;
2244 rdata
[1].buffer_std
= true;
2245 rdata
[1].next
= NULL
;
2247 recptr
= XLogInsert(RM_HEAP_ID
, XLOG_HEAP_DELETE
, rdata
);
2249 PageSetLSN(page
, recptr
);
2250 PageSetTLI(page
, ThisTimeLineID
);
2255 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2258 * If the tuple has toasted out-of-line attributes, we need to delete
2259 * those items too. We have to do this before releasing the buffer
2260 * because we need to look at the contents of the tuple, but it's OK to
2261 * release the content lock on the buffer first.
2263 if (relation
->rd_rel
->relkind
!= RELKIND_RELATION
)
2265 /* toast table entries should never be recursively toasted */
2266 Assert(!HeapTupleHasExternal(&tp
));
2268 else if (HeapTupleHasExternal(&tp
))
2269 toast_delete(relation
, &tp
);
2272 * Mark tuple for invalidation from system caches at next command
2273 * boundary. We have to do this before releasing the buffer because we
2274 * need to look at the contents of the tuple.
2276 CacheInvalidateHeapTuple(relation
, &tp
);
2278 /* Clear the bit in the visibility map if necessary */
2279 if (all_visible_cleared
)
2280 visibilitymap_clear(relation
, BufferGetBlockNumber(buffer
));
2282 /* Now we can release the buffer */
2283 ReleaseBuffer(buffer
);
2286 * Release the lmgr tuple lock, if we had it.
2288 if (have_tuple_lock
)
2289 UnlockTuple(relation
, &(tp
.t_self
), ExclusiveLock
);
2291 pgstat_count_heap_delete(relation
);
2293 return HeapTupleMayBeUpdated
;
2297 * simple_heap_delete - delete a tuple
2299 * This routine may be used to delete a tuple when concurrent updates of
2300 * the target tuple are not expected (for example, because we have a lock
2301 * on the relation associated with the tuple). Any failure is reported
2305 simple_heap_delete(Relation relation
, ItemPointer tid
)
2308 ItemPointerData update_ctid
;
2309 TransactionId update_xmax
;
2311 result
= heap_delete(relation
, tid
,
2312 &update_ctid
, &update_xmax
,
2313 GetCurrentCommandId(true), InvalidSnapshot
,
2314 true /* wait for commit */ );
2317 case HeapTupleSelfUpdated
:
2318 /* Tuple was already updated in current command? */
2319 elog(ERROR
, "tuple already updated by self");
2322 case HeapTupleMayBeUpdated
:
2323 /* done successfully */
2326 case HeapTupleUpdated
:
2327 elog(ERROR
, "tuple concurrently updated");
2331 elog(ERROR
, "unrecognized heap_delete status: %u", result
);
2337 * heap_update - replace a tuple
2339 * NB: do not call this directly unless you are prepared to deal with
2340 * concurrent-update conditions. Use simple_heap_update instead.
2342 * relation - table to be modified (caller must hold suitable lock)
2343 * otid - TID of old tuple to be replaced
2344 * newtup - newly constructed tuple data to store
2345 * ctid - output parameter, used only for failure case (see below)
2346 * update_xmax - output parameter, used only for failure case (see below)
2347 * cid - update command ID (used for visibility test, and stored into
2348 * cmax/cmin if successful)
2349 * crosscheck - if not InvalidSnapshot, also check old tuple against this
2350 * wait - true if should wait for any conflicting update to commit/abort
2352 * Normal, successful return value is HeapTupleMayBeUpdated, which
2353 * actually means we *did* update it. Failure return codes are
2354 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
2355 * (the last only possible if wait == false).
2357 * On success, the header fields of *newtup are updated to match the new
2358 * stored tuple; in particular, newtup->t_self is set to the TID where the
2359 * new tuple was inserted, and its HEAP_ONLY_TUPLE flag is set iff a HOT
2360 * update was done. However, any TOAST changes in the new tuple's
2361 * data are not reflected into *newtup.
2363 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
2364 * If t_ctid is the same as otid, the tuple was deleted; if different, the
2365 * tuple was updated, and t_ctid is the location of the replacement tuple.
2366 * (t_xmax is needed to verify that the replacement tuple matches.)
2369 heap_update(Relation relation
, ItemPointer otid
, HeapTuple newtup
,
2370 ItemPointer ctid
, TransactionId
*update_xmax
,
2371 CommandId cid
, Snapshot crosscheck
, bool wait
)
2374 TransactionId xid
= GetCurrentTransactionId();
2375 Bitmapset
*hot_attrs
;
2377 HeapTupleData oldtup
;
2386 bool have_tuple_lock
= false;
2388 bool use_hot_update
= false;
2389 bool all_visible_cleared
= false;
2390 bool all_visible_cleared_new
= false;
2392 Assert(ItemPointerIsValid(otid
));
2395 * Fetch the list of attributes to be checked for HOT update. This is
2396 * wasted effort if we fail to update or have to put the new tuple on a
2397 * different page. But we must compute the list before obtaining buffer
2398 * lock --- in the worst case, if we are doing an update on one of the
2399 * relevant system catalogs, we could deadlock if we try to fetch the list
2400 * later. In any case, the relcache caches the data so this is usually
2403 * Note that we get a copy here, so we need not worry about relcache flush
2404 * happening midway through.
2406 hot_attrs
= RelationGetIndexAttrBitmap(relation
);
2408 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(otid
));
2409 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2411 page
= BufferGetPage(buffer
);
2412 lp
= PageGetItemId(page
, ItemPointerGetOffsetNumber(otid
));
2413 Assert(ItemIdIsNormal(lp
));
2415 oldtup
.t_data
= (HeapTupleHeader
) PageGetItem(page
, lp
);
2416 oldtup
.t_len
= ItemIdGetLength(lp
);
2417 oldtup
.t_self
= *otid
;
2420 * Note: beyond this point, use oldtup not otid to refer to old tuple.
2421 * otid may very well point at newtup->t_self, which we will overwrite
2422 * with the new tuple's location, so there's great risk of confusion if we
2427 result
= HeapTupleSatisfiesUpdate(oldtup
.t_data
, cid
, buffer
);
2429 if (result
== HeapTupleInvisible
)
2431 UnlockReleaseBuffer(buffer
);
2432 elog(ERROR
, "attempted to update invisible tuple");
2434 else if (result
== HeapTupleBeingUpdated
&& wait
)
2436 TransactionId xwait
;
2439 /* must copy state data before unlocking buffer */
2440 xwait
= HeapTupleHeaderGetXmax(oldtup
.t_data
);
2441 infomask
= oldtup
.t_data
->t_infomask
;
2443 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2446 * Acquire tuple lock to establish our priority for the tuple (see
2447 * heap_lock_tuple). LockTuple will release us when we are
2448 * next-in-line for the tuple.
2450 * If we are forced to "start over" below, we keep the tuple lock;
2451 * this arranges that we stay at the head of the line while rechecking
2454 if (!have_tuple_lock
)
2456 LockTuple(relation
, &(oldtup
.t_self
), ExclusiveLock
);
2457 have_tuple_lock
= true;
2461 * Sleep until concurrent transaction ends. Note that we don't care
2462 * if the locker has an exclusive or shared lock, because we need
2466 if (infomask
& HEAP_XMAX_IS_MULTI
)
2468 /* wait for multixact */
2469 MultiXactIdWait((MultiXactId
) xwait
);
2470 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2473 * If xwait had just locked the tuple then some other xact could
2474 * update this tuple before we get to this point. Check for xmax
2475 * change, and start over if so.
2477 if (!(oldtup
.t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
2478 !TransactionIdEquals(HeapTupleHeaderGetXmax(oldtup
.t_data
),
2483 * You might think the multixact is necessarily done here, but not
2484 * so: it could have surviving members, namely our own xact or
2485 * other subxacts of this backend. It is legal for us to update
2486 * the tuple in either case, however (the latter case is
2487 * essentially a situation of upgrading our former shared lock to
2488 * exclusive). We don't bother changing the on-disk hint bits
2489 * since we are about to overwrite the xmax altogether.
2494 /* wait for regular transaction to end */
2495 XactLockTableWait(xwait
);
2496 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2499 * xwait is done, but if xwait had just locked the tuple then some
2500 * other xact could update this tuple before we get to this point.
2501 * Check for xmax change, and start over if so.
2503 if ((oldtup
.t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
2504 !TransactionIdEquals(HeapTupleHeaderGetXmax(oldtup
.t_data
),
2508 /* Otherwise check if it committed or aborted */
2509 UpdateXmaxHintBits(oldtup
.t_data
, buffer
, xwait
);
2513 * We may overwrite if previous xmax aborted, or if it committed but
2514 * only locked the tuple without updating it.
2516 if (oldtup
.t_data
->t_infomask
& (HEAP_XMAX_INVALID
|
2518 result
= HeapTupleMayBeUpdated
;
2520 result
= HeapTupleUpdated
;
2523 if (crosscheck
!= InvalidSnapshot
&& result
== HeapTupleMayBeUpdated
)
2525 /* Perform additional check for serializable RI updates */
2526 if (!HeapTupleSatisfiesVisibility(&oldtup
, crosscheck
, buffer
))
2527 result
= HeapTupleUpdated
;
2530 if (result
!= HeapTupleMayBeUpdated
)
2532 Assert(result
== HeapTupleSelfUpdated
||
2533 result
== HeapTupleUpdated
||
2534 result
== HeapTupleBeingUpdated
);
2535 Assert(!(oldtup
.t_data
->t_infomask
& HEAP_XMAX_INVALID
));
2536 *ctid
= oldtup
.t_data
->t_ctid
;
2537 *update_xmax
= HeapTupleHeaderGetXmax(oldtup
.t_data
);
2538 UnlockReleaseBuffer(buffer
);
2539 if (have_tuple_lock
)
2540 UnlockTuple(relation
, &(oldtup
.t_self
), ExclusiveLock
);
2541 bms_free(hot_attrs
);
2545 /* Fill in OID and transaction status data for newtup */
2546 if (relation
->rd_rel
->relhasoids
)
2549 /* this is redundant with an Assert in HeapTupleSetOid */
2550 Assert(newtup
->t_data
->t_infomask
& HEAP_HASOID
);
2552 HeapTupleSetOid(newtup
, HeapTupleGetOid(&oldtup
));
2556 /* check there is not space for an OID */
2557 Assert(!(newtup
->t_data
->t_infomask
& HEAP_HASOID
));
2560 newtup
->t_data
->t_infomask
&= ~(HEAP_XACT_MASK
);
2561 newtup
->t_data
->t_infomask2
&= ~(HEAP2_XACT_MASK
);
2562 newtup
->t_data
->t_infomask
|= (HEAP_XMAX_INVALID
| HEAP_UPDATED
);
2563 HeapTupleHeaderSetXmin(newtup
->t_data
, xid
);
2564 HeapTupleHeaderSetCmin(newtup
->t_data
, cid
);
2565 HeapTupleHeaderSetXmax(newtup
->t_data
, 0); /* for cleanliness */
2566 newtup
->t_tableOid
= RelationGetRelid(relation
);
2569 * Replace cid with a combo cid if necessary. Note that we already put
2570 * the plain cid into the new tuple.
2572 HeapTupleHeaderAdjustCmax(oldtup
.t_data
, &cid
, &iscombo
);
2575 * If the toaster needs to be activated, OR if the new tuple will not fit
2576 * on the same page as the old, then we need to release the content lock
2577 * (but not the pin!) on the old tuple's buffer while we are off doing
2578 * TOAST and/or table-file-extension work. We must mark the old tuple to
2579 * show that it's already being updated, else other processes may try to
2580 * update it themselves.
2582 * We need to invoke the toaster if there are already any out-of-line
2583 * toasted values present, or if the new tuple is over-threshold.
2585 if (relation
->rd_rel
->relkind
!= RELKIND_RELATION
)
2587 /* toast table entries should never be recursively toasted */
2588 Assert(!HeapTupleHasExternal(&oldtup
));
2589 Assert(!HeapTupleHasExternal(newtup
));
2593 need_toast
= (HeapTupleHasExternal(&oldtup
) ||
2594 HeapTupleHasExternal(newtup
) ||
2595 newtup
->t_len
> TOAST_TUPLE_THRESHOLD
);
2597 pagefree
= PageGetHeapFreeSpace(page
);
2599 newtupsize
= MAXALIGN(newtup
->t_len
);
2601 if (need_toast
|| newtupsize
> pagefree
)
2603 /* Clear obsolete visibility flags ... */
2604 oldtup
.t_data
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
2606 HEAP_XMAX_IS_MULTI
|
2609 HeapTupleClearHotUpdated(&oldtup
);
2610 /* ... and store info about transaction updating this tuple */
2611 HeapTupleHeaderSetXmax(oldtup
.t_data
, xid
);
2612 HeapTupleHeaderSetCmax(oldtup
.t_data
, cid
, iscombo
);
2613 /* temporarily make it look not-updated */
2614 oldtup
.t_data
->t_ctid
= oldtup
.t_self
;
2615 already_marked
= true;
2616 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2619 * Let the toaster do its thing, if needed.
2621 * Note: below this point, heaptup is the data we actually intend to
2622 * store into the relation; newtup is the caller's original untoasted
2627 /* Note we always use WAL and FSM during updates */
2628 heaptup
= toast_insert_or_update(relation
, newtup
, &oldtup
, 0);
2629 newtupsize
= MAXALIGN(heaptup
->t_len
);
2635 * Now, do we need a new page for the tuple, or not? This is a bit
2636 * tricky since someone else could have added tuples to the page while
2637 * we weren't looking. We have to recheck the available space after
2638 * reacquiring the buffer lock. But don't bother to do that if the
2639 * former amount of free space is still not enough; it's unlikely
2640 * there's more free now than before.
2642 * What's more, if we need to get a new page, we will need to acquire
2643 * buffer locks on both old and new pages. To avoid deadlock against
2644 * some other backend trying to get the same two locks in the other
2645 * order, we must be consistent about the order we get the locks in.
2646 * We use the rule "lock the lower-numbered page of the relation
2647 * first". To implement this, we must do RelationGetBufferForTuple
2648 * while not holding the lock on the old page, and we must rely on it
2649 * to get the locks on both pages in the correct order.
2651 if (newtupsize
> pagefree
)
2653 /* Assume there's no chance to put heaptup on same page. */
2654 newbuf
= RelationGetBufferForTuple(relation
, heaptup
->t_len
,
2659 /* Re-acquire the lock on the old tuple's page. */
2660 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2661 /* Re-check using the up-to-date free space */
2662 pagefree
= PageGetHeapFreeSpace(page
);
2663 if (newtupsize
> pagefree
)
2666 * Rats, it doesn't fit anymore. We must now unlock and
2667 * relock to avoid deadlock. Fortunately, this path should
2670 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2671 newbuf
= RelationGetBufferForTuple(relation
, heaptup
->t_len
,
2676 /* OK, it fits here, so we're done. */
2683 /* No TOAST work needed, and it'll fit on same page */
2684 already_marked
= false;
2690 * At this point newbuf and buffer are both pinned and locked, and newbuf
2691 * has enough space for the new tuple. If they are the same buffer, only
2695 if (newbuf
== buffer
)
2698 * Since the new tuple is going into the same page, we might be able
2699 * to do a HOT update. Check if any of the index columns have been
2700 * changed. If not, then HOT update is possible.
2702 if (HeapSatisfiesHOTUpdate(relation
, hot_attrs
, &oldtup
, heaptup
))
2703 use_hot_update
= true;
2707 /* Set a hint that the old page could use prune/defrag */
2711 /* NO EREPORT(ERROR) from here till changes are logged */
2712 START_CRIT_SECTION();
2715 * If this transaction commits, the old tuple will become DEAD sooner or
2716 * later. Set flag that this page is a candidate for pruning once our xid
2717 * falls below the OldestXmin horizon. If the transaction finally aborts,
2718 * the subsequent page pruning will be a no-op and the hint will be
2721 * XXX Should we set hint on newbuf as well? If the transaction aborts,
2722 * there would be a prunable tuple in the newbuf; but for now we choose
2723 * not to optimize for aborts. Note that heap_xlog_update must be kept in
2724 * sync if this decision changes.
2726 PageSetPrunable(page
, xid
);
2730 /* Mark the old tuple as HOT-updated */
2731 HeapTupleSetHotUpdated(&oldtup
);
2732 /* And mark the new tuple as heap-only */
2733 HeapTupleSetHeapOnly(heaptup
);
2734 /* Mark the caller's copy too, in case different from heaptup */
2735 HeapTupleSetHeapOnly(newtup
);
2739 /* Make sure tuples are correctly marked as not-HOT */
2740 HeapTupleClearHotUpdated(&oldtup
);
2741 HeapTupleClearHeapOnly(heaptup
);
2742 HeapTupleClearHeapOnly(newtup
);
2745 RelationPutHeapTuple(relation
, newbuf
, heaptup
); /* insert new tuple */
2747 if (!already_marked
)
2749 /* Clear obsolete visibility flags ... */
2750 oldtup
.t_data
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
2752 HEAP_XMAX_IS_MULTI
|
2755 /* ... and store info about transaction updating this tuple */
2756 HeapTupleHeaderSetXmax(oldtup
.t_data
, xid
);
2757 HeapTupleHeaderSetCmax(oldtup
.t_data
, cid
, iscombo
);
2760 /* record address of new tuple in t_ctid of old one */
2761 oldtup
.t_data
->t_ctid
= heaptup
->t_self
;
2763 if (newbuf
!= buffer
)
2764 MarkBufferDirty(newbuf
);
2765 MarkBufferDirty(buffer
);
2768 * Note: we mustn't clear PD_ALL_VISIBLE flags before writing the WAL
2769 * record, because log_heap_update looks at those flags to set the
2770 * corresponding flags in the WAL record.
2774 if (!relation
->rd_istemp
)
2776 XLogRecPtr recptr
= log_heap_update(relation
, buffer
, oldtup
.t_self
,
2777 newbuf
, heaptup
, false);
2779 if (newbuf
!= buffer
)
2781 PageSetLSN(BufferGetPage(newbuf
), recptr
);
2782 PageSetTLI(BufferGetPage(newbuf
), ThisTimeLineID
);
2784 PageSetLSN(BufferGetPage(buffer
), recptr
);
2785 PageSetTLI(BufferGetPage(buffer
), ThisTimeLineID
);
2788 /* Clear PD_ALL_VISIBLE flags */
2789 if (PageIsAllVisible(BufferGetPage(buffer
)))
2791 all_visible_cleared
= true;
2792 PageClearAllVisible(BufferGetPage(buffer
));
2794 if (newbuf
!= buffer
&& PageIsAllVisible(BufferGetPage(newbuf
)))
2796 all_visible_cleared_new
= true;
2797 PageClearAllVisible(BufferGetPage(newbuf
));
2802 if (newbuf
!= buffer
)
2803 LockBuffer(newbuf
, BUFFER_LOCK_UNLOCK
);
2804 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2807 * Mark old tuple for invalidation from system caches at next command
2808 * boundary. We have to do this before releasing the buffer because we
2809 * need to look at the contents of the tuple.
2811 CacheInvalidateHeapTuple(relation
, &oldtup
);
2813 /* Clear bits in visibility map */
2814 if (all_visible_cleared
)
2815 visibilitymap_clear(relation
, BufferGetBlockNumber(buffer
));
2816 if (all_visible_cleared_new
)
2817 visibilitymap_clear(relation
, BufferGetBlockNumber(newbuf
));
2819 /* Now we can release the buffer(s) */
2820 if (newbuf
!= buffer
)
2821 ReleaseBuffer(newbuf
);
2822 ReleaseBuffer(buffer
);
2825 * If new tuple is cachable, mark it for invalidation from the caches in
2826 * case we abort. Note it is OK to do this after releasing the buffer,
2827 * because the heaptup data structure is all in local memory, not in the
2830 CacheInvalidateHeapTuple(relation
, heaptup
);
2833 * Release the lmgr tuple lock, if we had it.
2835 if (have_tuple_lock
)
2836 UnlockTuple(relation
, &(oldtup
.t_self
), ExclusiveLock
);
2838 pgstat_count_heap_update(relation
, use_hot_update
);
2841 * If heaptup is a private copy, release it. Don't forget to copy t_self
2842 * back to the caller's image, too.
2844 if (heaptup
!= newtup
)
2846 newtup
->t_self
= heaptup
->t_self
;
2847 heap_freetuple(heaptup
);
2850 bms_free(hot_attrs
);
2852 return HeapTupleMayBeUpdated
;
2856 * Check if the specified attribute's value is same in both given tuples.
2857 * Subroutine for HeapSatisfiesHOTUpdate.
2860 heap_tuple_attr_equals(TupleDesc tupdesc
, int attrnum
,
2861 HeapTuple tup1
, HeapTuple tup2
)
2867 Form_pg_attribute att
;
2870 * If it's a whole-tuple reference, say "not equal". It's not really
2871 * worth supporting this case, since it could only succeed after a no-op
2872 * update, which is hardly a case worth optimizing for.
2878 * Likewise, automatically say "not equal" for any system attribute other
2879 * than OID and tableOID; we cannot expect these to be consistent in a HOT
2880 * chain, or even to be set correctly yet in the new tuple.
2884 if (attrnum
!= ObjectIdAttributeNumber
&&
2885 attrnum
!= TableOidAttributeNumber
)
2890 * Extract the corresponding values. XXX this is pretty inefficient if
2891 * there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
2892 * single heap_deform_tuple call on each tuple, instead? But that doesn't
2893 * work for system columns ...
2895 value1
= heap_getattr(tup1
, attrnum
, tupdesc
, &isnull1
);
2896 value2
= heap_getattr(tup2
, attrnum
, tupdesc
, &isnull2
);
2899 * If one value is NULL and other is not, then they are certainly not
2902 if (isnull1
!= isnull2
)
2906 * If both are NULL, they can be considered equal.
2912 * We do simple binary comparison of the two datums. This may be overly
2913 * strict because there can be multiple binary representations for the
2914 * same logical value. But we should be OK as long as there are no false
2915 * positives. Using a type-specific equality operator is messy because
2916 * there could be multiple notions of equality in different operator
2917 * classes; furthermore, we cannot safely invoke user-defined functions
2918 * while holding exclusive buffer lock.
2922 /* The only allowed system columns are OIDs, so do this */
2923 return (DatumGetObjectId(value1
) == DatumGetObjectId(value2
));
2927 Assert(attrnum
<= tupdesc
->natts
);
2928 att
= tupdesc
->attrs
[attrnum
- 1];
2929 return datumIsEqual(value1
, value2
, att
->attbyval
, att
->attlen
);
2934 * Check if the old and new tuples represent a HOT-safe update. To be able
2935 * to do a HOT update, we must not have changed any columns used in index
2938 * The set of attributes to be checked is passed in (we dare not try to
2939 * compute it while holding exclusive buffer lock...) NOTE that hot_attrs
2940 * is destructively modified! That is OK since this is invoked at most once
2943 * Returns true if safe to do HOT update.
2946 HeapSatisfiesHOTUpdate(Relation relation
, Bitmapset
*hot_attrs
,
2947 HeapTuple oldtup
, HeapTuple newtup
)
2951 while ((attrnum
= bms_first_member(hot_attrs
)) >= 0)
2953 /* Adjust for system attributes */
2954 attrnum
+= FirstLowInvalidHeapAttributeNumber
;
2956 /* If the attribute value has changed, we can't do HOT update */
2957 if (!heap_tuple_attr_equals(RelationGetDescr(relation
), attrnum
,
2966 * simple_heap_update - replace a tuple
2968 * This routine may be used to update a tuple when concurrent updates of
2969 * the target tuple are not expected (for example, because we have a lock
2970 * on the relation associated with the tuple). Any failure is reported
2974 simple_heap_update(Relation relation
, ItemPointer otid
, HeapTuple tup
)
2977 ItemPointerData update_ctid
;
2978 TransactionId update_xmax
;
2980 result
= heap_update(relation
, otid
, tup
,
2981 &update_ctid
, &update_xmax
,
2982 GetCurrentCommandId(true), InvalidSnapshot
,
2983 true /* wait for commit */ );
2986 case HeapTupleSelfUpdated
:
2987 /* Tuple was already updated in current command? */
2988 elog(ERROR
, "tuple already updated by self");
2991 case HeapTupleMayBeUpdated
:
2992 /* done successfully */
2995 case HeapTupleUpdated
:
2996 elog(ERROR
, "tuple concurrently updated");
3000 elog(ERROR
, "unrecognized heap_update status: %u", result
);
3006 * heap_lock_tuple - lock a tuple in shared or exclusive mode
3008 * Note that this acquires a buffer pin, which the caller must release.
3011 * relation: relation containing tuple (caller must hold suitable lock)
3012 * tuple->t_self: TID of tuple to lock (rest of struct need not be valid)
3013 * cid: current command ID (used for visibility test, and stored into
3014 * tuple's cmax if lock is successful)
3015 * mode: indicates if shared or exclusive tuple lock is desired
3016 * nowait: if true, ereport rather than blocking if lock not available
3018 * Output parameters:
3019 * *tuple: all fields filled in
3020 * *buffer: set to buffer holding tuple (pinned but not locked at exit)
3021 * *ctid: set to tuple's t_ctid, but only in failure cases
3022 * *update_xmax: set to tuple's xmax, but only in failure cases
3024 * Function result may be:
3025 * HeapTupleMayBeUpdated: lock was successfully acquired
3026 * HeapTupleSelfUpdated: lock failed because tuple updated by self
3027 * HeapTupleUpdated: lock failed because tuple updated by other xact
3029 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
3030 * If t_ctid is the same as t_self, the tuple was deleted; if different, the
3031 * tuple was updated, and t_ctid is the location of the replacement tuple.
3032 * (t_xmax is needed to verify that the replacement tuple matches.)
3035 * NOTES: because the shared-memory lock table is of finite size, but users
3036 * could reasonably want to lock large numbers of tuples, we do not rely on
3037 * the standard lock manager to store tuple-level locks over the long term.
3038 * Instead, a tuple is marked as locked by setting the current transaction's
3039 * XID as its XMAX, and setting additional infomask bits to distinguish this
3040 * usage from the more normal case of having deleted the tuple. When
3041 * multiple transactions concurrently share-lock a tuple, the first locker's
3042 * XID is replaced in XMAX with a MultiTransactionId representing the set of
3043 * XIDs currently holding share-locks.
3045 * When it is necessary to wait for a tuple-level lock to be released, the
3046 * basic delay is provided by XactLockTableWait or MultiXactIdWait on the
3047 * contents of the tuple's XMAX. However, that mechanism will release all
3048 * waiters concurrently, so there would be a race condition as to which
3049 * waiter gets the tuple, potentially leading to indefinite starvation of
3050 * some waiters. The possibility of share-locking makes the problem much
3051 * worse --- a steady stream of share-lockers can easily block an exclusive
3052 * locker forever. To provide more reliable semantics about who gets a
3053 * tuple-level lock first, we use the standard lock manager. The protocol
3054 * for waiting for a tuple-level lock is really
3056 * XactLockTableWait()
3057 * mark tuple as locked by me
3059 * When there are multiple waiters, arbitration of who is to get the lock next
3060 * is provided by LockTuple(). However, at most one tuple-level lock will
3061 * be held or awaited per backend at any time, so we don't risk overflow
3062 * of the lock table. Note that incoming share-lockers are required to
3063 * do LockTuple as well, if there is any conflict, to ensure that they don't
3064 * starve out waiting exclusive-lockers. However, if there is not any active
3065 * conflict for a tuple, we don't incur any extra overhead.
3068 heap_lock_tuple(Relation relation
, HeapTuple tuple
, Buffer
*buffer
,
3069 ItemPointer ctid
, TransactionId
*update_xmax
,
3070 CommandId cid
, LockTupleMode mode
, bool nowait
)
3073 ItemPointer tid
= &(tuple
->t_self
);
3078 uint16 old_infomask
;
3079 uint16 new_infomask
;
3080 LOCKMODE tuple_lock_type
;
3081 bool have_tuple_lock
= false;
3083 tuple_lock_type
= (mode
== LockTupleShared
) ? ShareLock
: ExclusiveLock
;
3085 *buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(tid
));
3086 LockBuffer(*buffer
, BUFFER_LOCK_EXCLUSIVE
);
3088 page
= BufferGetPage(*buffer
);
3089 lp
= PageGetItemId(page
, ItemPointerGetOffsetNumber(tid
));
3090 Assert(ItemIdIsNormal(lp
));
3092 tuple
->t_data
= (HeapTupleHeader
) PageGetItem(page
, lp
);
3093 tuple
->t_len
= ItemIdGetLength(lp
);
3094 tuple
->t_tableOid
= RelationGetRelid(relation
);
3097 result
= HeapTupleSatisfiesUpdate(tuple
->t_data
, cid
, *buffer
);
3099 if (result
== HeapTupleInvisible
)
3101 UnlockReleaseBuffer(*buffer
);
3102 elog(ERROR
, "attempted to lock invisible tuple");
3104 else if (result
== HeapTupleBeingUpdated
)
3106 TransactionId xwait
;
3109 /* must copy state data before unlocking buffer */
3110 xwait
= HeapTupleHeaderGetXmax(tuple
->t_data
);
3111 infomask
= tuple
->t_data
->t_infomask
;
3113 LockBuffer(*buffer
, BUFFER_LOCK_UNLOCK
);
3116 * If we wish to acquire share lock, and the tuple is already
3117 * share-locked by a multixact that includes any subtransaction of the
3118 * current top transaction, then we effectively hold the desired lock
3119 * already. We *must* succeed without trying to take the tuple lock,
3120 * else we will deadlock against anyone waiting to acquire exclusive
3121 * lock. We don't need to make any state changes in this case.
3123 if (mode
== LockTupleShared
&&
3124 (infomask
& HEAP_XMAX_IS_MULTI
) &&
3125 MultiXactIdIsCurrent((MultiXactId
) xwait
))
3127 Assert(infomask
& HEAP_XMAX_SHARED_LOCK
);
3128 /* Probably can't hold tuple lock here, but may as well check */
3129 if (have_tuple_lock
)
3130 UnlockTuple(relation
, tid
, tuple_lock_type
);
3131 return HeapTupleMayBeUpdated
;
3135 * Acquire tuple lock to establish our priority for the tuple.
3136 * LockTuple will release us when we are next-in-line for the tuple.
3137 * We must do this even if we are share-locking.
3139 * If we are forced to "start over" below, we keep the tuple lock;
3140 * this arranges that we stay at the head of the line while rechecking
3143 if (!have_tuple_lock
)
3147 if (!ConditionalLockTuple(relation
, tid
, tuple_lock_type
))
3149 (errcode(ERRCODE_LOCK_NOT_AVAILABLE
),
3150 errmsg("could not obtain lock on row in relation \"%s\"",
3151 RelationGetRelationName(relation
))));
3154 LockTuple(relation
, tid
, tuple_lock_type
);
3155 have_tuple_lock
= true;
3158 if (mode
== LockTupleShared
&& (infomask
& HEAP_XMAX_SHARED_LOCK
))
3161 * Acquiring sharelock when there's at least one sharelocker
3162 * already. We need not wait for him/them to complete.
3164 LockBuffer(*buffer
, BUFFER_LOCK_EXCLUSIVE
);
3167 * Make sure it's still a shared lock, else start over. (It's OK
3168 * if the ownership of the shared lock has changed, though.)
3170 if (!(tuple
->t_data
->t_infomask
& HEAP_XMAX_SHARED_LOCK
))
3173 else if (infomask
& HEAP_XMAX_IS_MULTI
)
3175 /* wait for multixact to end */
3178 if (!ConditionalMultiXactIdWait((MultiXactId
) xwait
))
3180 (errcode(ERRCODE_LOCK_NOT_AVAILABLE
),
3181 errmsg("could not obtain lock on row in relation \"%s\"",
3182 RelationGetRelationName(relation
))));
3185 MultiXactIdWait((MultiXactId
) xwait
);
3187 LockBuffer(*buffer
, BUFFER_LOCK_EXCLUSIVE
);
3190 * If xwait had just locked the tuple then some other xact could
3191 * update this tuple before we get to this point. Check for xmax
3192 * change, and start over if so.
3194 if (!(tuple
->t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
3195 !TransactionIdEquals(HeapTupleHeaderGetXmax(tuple
->t_data
),
3200 * You might think the multixact is necessarily done here, but not
3201 * so: it could have surviving members, namely our own xact or
3202 * other subxacts of this backend. It is legal for us to lock the
3203 * tuple in either case, however. We don't bother changing the
3204 * on-disk hint bits since we are about to overwrite the xmax
3210 /* wait for regular transaction to end */
3213 if (!ConditionalXactLockTableWait(xwait
))
3215 (errcode(ERRCODE_LOCK_NOT_AVAILABLE
),
3216 errmsg("could not obtain lock on row in relation \"%s\"",
3217 RelationGetRelationName(relation
))));
3220 XactLockTableWait(xwait
);
3222 LockBuffer(*buffer
, BUFFER_LOCK_EXCLUSIVE
);
3225 * xwait is done, but if xwait had just locked the tuple then some
3226 * other xact could update this tuple before we get to this point.
3227 * Check for xmax change, and start over if so.
3229 if ((tuple
->t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
3230 !TransactionIdEquals(HeapTupleHeaderGetXmax(tuple
->t_data
),
3234 /* Otherwise check if it committed or aborted */
3235 UpdateXmaxHintBits(tuple
->t_data
, *buffer
, xwait
);
3239 * We may lock if previous xmax aborted, or if it committed but only
3240 * locked the tuple without updating it. The case where we didn't
3241 * wait because we are joining an existing shared lock is correctly
3244 if (tuple
->t_data
->t_infomask
& (HEAP_XMAX_INVALID
|
3246 result
= HeapTupleMayBeUpdated
;
3248 result
= HeapTupleUpdated
;
3251 if (result
!= HeapTupleMayBeUpdated
)
3253 Assert(result
== HeapTupleSelfUpdated
|| result
== HeapTupleUpdated
);
3254 Assert(!(tuple
->t_data
->t_infomask
& HEAP_XMAX_INVALID
));
3255 *ctid
= tuple
->t_data
->t_ctid
;
3256 *update_xmax
= HeapTupleHeaderGetXmax(tuple
->t_data
);
3257 LockBuffer(*buffer
, BUFFER_LOCK_UNLOCK
);
3258 if (have_tuple_lock
)
3259 UnlockTuple(relation
, tid
, tuple_lock_type
);
3264 * We might already hold the desired lock (or stronger), possibly under a
3265 * different subtransaction of the current top transaction. If so, there
3266 * is no need to change state or issue a WAL record. We already handled
3267 * the case where this is true for xmax being a MultiXactId, so now check
3268 * for cases where it is a plain TransactionId.
3270 * Note in particular that this covers the case where we already hold
3271 * exclusive lock on the tuple and the caller only wants shared lock. It
3272 * would certainly not do to give up the exclusive lock.
3274 xmax
= HeapTupleHeaderGetXmax(tuple
->t_data
);
3275 old_infomask
= tuple
->t_data
->t_infomask
;
3277 if (!(old_infomask
& (HEAP_XMAX_INVALID
|
3278 HEAP_XMAX_COMMITTED
|
3279 HEAP_XMAX_IS_MULTI
)) &&
3280 (mode
== LockTupleShared
?
3281 (old_infomask
& HEAP_IS_LOCKED
) :
3282 (old_infomask
& HEAP_XMAX_EXCL_LOCK
)) &&
3283 TransactionIdIsCurrentTransactionId(xmax
))
3285 LockBuffer(*buffer
, BUFFER_LOCK_UNLOCK
);
3286 /* Probably can't hold tuple lock here, but may as well check */
3287 if (have_tuple_lock
)
3288 UnlockTuple(relation
, tid
, tuple_lock_type
);
3289 return HeapTupleMayBeUpdated
;
3293 * Compute the new xmax and infomask to store into the tuple. Note we do
3294 * not modify the tuple just yet, because that would leave it in the wrong
3295 * state if multixact.c elogs.
3297 xid
= GetCurrentTransactionId();
3299 new_infomask
= old_infomask
& ~(HEAP_XMAX_COMMITTED
|
3301 HEAP_XMAX_IS_MULTI
|
3305 if (mode
== LockTupleShared
)
3308 * If this is the first acquisition of a shared lock in the current
3309 * transaction, set my per-backend OldestMemberMXactId setting. We can
3310 * be certain that the transaction will never become a member of any
3311 * older MultiXactIds than that. (We have to do this even if we end
3312 * up just using our own TransactionId below, since some other backend
3313 * could incorporate our XID into a MultiXact immediately afterwards.)
3315 MultiXactIdSetOldestMember();
3317 new_infomask
|= HEAP_XMAX_SHARED_LOCK
;
3320 * Check to see if we need a MultiXactId because there are multiple
3323 * HeapTupleSatisfiesUpdate will have set the HEAP_XMAX_INVALID bit if
3324 * the xmax was a MultiXactId but it was not running anymore. There is
3325 * a race condition, which is that the MultiXactId may have finished
3326 * since then, but that uncommon case is handled within
3327 * MultiXactIdExpand.
3329 * There is a similar race condition possible when the old xmax was a
3330 * regular TransactionId. We test TransactionIdIsInProgress again
3331 * just to narrow the window, but it's still possible to end up
3332 * creating an unnecessary MultiXactId. Fortunately this is harmless.
3334 if (!(old_infomask
& (HEAP_XMAX_INVALID
| HEAP_XMAX_COMMITTED
)))
3336 if (old_infomask
& HEAP_XMAX_IS_MULTI
)
3339 * If the XMAX is already a MultiXactId, then we need to
3340 * expand it to include our own TransactionId.
3342 xid
= MultiXactIdExpand((MultiXactId
) xmax
, xid
);
3343 new_infomask
|= HEAP_XMAX_IS_MULTI
;
3345 else if (TransactionIdIsInProgress(xmax
))
3348 * If the XMAX is a valid TransactionId, then we need to
3349 * create a new MultiXactId that includes both the old locker
3350 * and our own TransactionId.
3352 xid
= MultiXactIdCreate(xmax
, xid
);
3353 new_infomask
|= HEAP_XMAX_IS_MULTI
;
3358 * Can get here iff HeapTupleSatisfiesUpdate saw the old xmax
3359 * as running, but it finished before
3360 * TransactionIdIsInProgress() got to run. Treat it like
3361 * there's no locker in the tuple.
3368 * There was no previous locker, so just insert our own
3375 /* We want an exclusive lock on the tuple */
3376 new_infomask
|= HEAP_XMAX_EXCL_LOCK
;
3379 START_CRIT_SECTION();
3382 * Store transaction information of xact locking the tuple.
3384 * Note: Cmax is meaningless in this context, so don't set it; this avoids
3385 * possibly generating a useless combo CID.
3387 tuple
->t_data
->t_infomask
= new_infomask
;
3388 HeapTupleHeaderClearHotUpdated(tuple
->t_data
);
3389 HeapTupleHeaderSetXmax(tuple
->t_data
, xid
);
3390 /* Make sure there is no forward chain link in t_ctid */
3391 tuple
->t_data
->t_ctid
= *tid
;
3393 MarkBufferDirty(*buffer
);
3396 * XLOG stuff. You might think that we don't need an XLOG record because
3397 * there is no state change worth restoring after a crash. You would be
3398 * wrong however: we have just written either a TransactionId or a
3399 * MultiXactId that may never have been seen on disk before, and we need
3400 * to make sure that there are XLOG entries covering those ID numbers.
3401 * Else the same IDs might be re-used after a crash, which would be
3402 * disastrous if this page made it to disk before the crash. Essentially
3403 * we have to enforce the WAL log-before-data rule even in this case.
3404 * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
3405 * entries for everything anyway.)
3407 if (!relation
->rd_istemp
)
3411 XLogRecData rdata
[2];
3413 xlrec
.target
.node
= relation
->rd_node
;
3414 xlrec
.target
.tid
= tuple
->t_self
;
3415 xlrec
.locking_xid
= xid
;
3416 xlrec
.xid_is_mxact
= ((new_infomask
& HEAP_XMAX_IS_MULTI
) != 0);
3417 xlrec
.shared_lock
= (mode
== LockTupleShared
);
3418 rdata
[0].data
= (char *) &xlrec
;
3419 rdata
[0].len
= SizeOfHeapLock
;
3420 rdata
[0].buffer
= InvalidBuffer
;
3421 rdata
[0].next
= &(rdata
[1]);
3423 rdata
[1].data
= NULL
;
3425 rdata
[1].buffer
= *buffer
;
3426 rdata
[1].buffer_std
= true;
3427 rdata
[1].next
= NULL
;
3429 recptr
= XLogInsert(RM_HEAP_ID
, XLOG_HEAP_LOCK
, rdata
);
3431 PageSetLSN(page
, recptr
);
3432 PageSetTLI(page
, ThisTimeLineID
);
3437 LockBuffer(*buffer
, BUFFER_LOCK_UNLOCK
);
3440 * Don't update the visibility map here. Locking a tuple doesn't change
3445 * Now that we have successfully marked the tuple as locked, we can
3446 * release the lmgr tuple lock, if we had it.
3448 if (have_tuple_lock
)
3449 UnlockTuple(relation
, tid
, tuple_lock_type
);
3451 return HeapTupleMayBeUpdated
;
3456 * heap_inplace_update - update a tuple "in place" (ie, overwrite it)
3458 * Overwriting violates both MVCC and transactional safety, so the uses
3459 * of this function in Postgres are extremely limited. Nonetheless we
3460 * find some places to use it.
3462 * The tuple cannot change size, and therefore it's reasonable to assume
3463 * that its null bitmap (if any) doesn't change either. So we just
3464 * overwrite the data portion of the tuple without touching the null
3465 * bitmap or any of the header fields.
3467 * tuple is an in-memory tuple structure containing the data to be written
3468 * over the target tuple. Also, tuple->t_self identifies the target tuple.
3471 heap_inplace_update(Relation relation
, HeapTuple tuple
)
3475 OffsetNumber offnum
;
3477 HeapTupleHeader htup
;
3481 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(&(tuple
->t_self
)));
3482 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
3483 page
= (Page
) BufferGetPage(buffer
);
3485 offnum
= ItemPointerGetOffsetNumber(&(tuple
->t_self
));
3486 if (PageGetMaxOffsetNumber(page
) >= offnum
)
3487 lp
= PageGetItemId(page
, offnum
);
3489 if (PageGetMaxOffsetNumber(page
) < offnum
|| !ItemIdIsNormal(lp
))
3490 elog(ERROR
, "heap_inplace_update: invalid lp");
3492 htup
= (HeapTupleHeader
) PageGetItem(page
, lp
);
3494 oldlen
= ItemIdGetLength(lp
) - htup
->t_hoff
;
3495 newlen
= tuple
->t_len
- tuple
->t_data
->t_hoff
;
3496 if (oldlen
!= newlen
|| htup
->t_hoff
!= tuple
->t_data
->t_hoff
)
3497 elog(ERROR
, "heap_inplace_update: wrong tuple length");
3499 /* NO EREPORT(ERROR) from here till changes are logged */
3500 START_CRIT_SECTION();
3502 memcpy((char *) htup
+ htup
->t_hoff
,
3503 (char *) tuple
->t_data
+ tuple
->t_data
->t_hoff
,
3506 MarkBufferDirty(buffer
);
3509 if (!relation
->rd_istemp
)
3511 xl_heap_inplace xlrec
;
3513 XLogRecData rdata
[2];
3515 xlrec
.target
.node
= relation
->rd_node
;
3516 xlrec
.target
.tid
= tuple
->t_self
;
3518 rdata
[0].data
= (char *) &xlrec
;
3519 rdata
[0].len
= SizeOfHeapInplace
;
3520 rdata
[0].buffer
= InvalidBuffer
;
3521 rdata
[0].next
= &(rdata
[1]);
3523 rdata
[1].data
= (char *) htup
+ htup
->t_hoff
;
3524 rdata
[1].len
= newlen
;
3525 rdata
[1].buffer
= buffer
;
3526 rdata
[1].buffer_std
= true;
3527 rdata
[1].next
= NULL
;
3529 recptr
= XLogInsert(RM_HEAP_ID
, XLOG_HEAP_INPLACE
, rdata
);
3531 PageSetLSN(page
, recptr
);
3532 PageSetTLI(page
, ThisTimeLineID
);
3537 UnlockReleaseBuffer(buffer
);
3539 /* Send out shared cache inval if necessary */
3540 if (!IsBootstrapProcessingMode())
3541 CacheInvalidateHeapTuple(relation
, tuple
);
3548 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
3549 * are older than the specified cutoff XID. If so, replace them with
3550 * FrozenTransactionId or InvalidTransactionId as appropriate, and return
3551 * TRUE. Return FALSE if nothing was changed.
3553 * It is assumed that the caller has checked the tuple with
3554 * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
3555 * (else we should be removing the tuple, not freezing it).
3557 * NB: cutoff_xid *must* be <= the current global xmin, to ensure that any
3558 * XID older than it could neither be running nor seen as running by any
3559 * open transaction. This ensures that the replacement will not change
3560 * anyone's idea of the tuple state. Also, since we assume the tuple is
3561 * not HEAPTUPLE_DEAD, the fact that an XID is not still running allows us
3562 * to assume that it is either committed good or aborted, as appropriate;
3563 * so we need no external state checks to decide what to do. (This is good
3564 * because this function is applied during WAL recovery, when we don't have
3565 * access to any such state, and can't depend on the hint bits to be set.)
3567 * In lazy VACUUM, we call this while initially holding only a shared lock
3568 * on the tuple's buffer. If any change is needed, we trade that in for an
3569 * exclusive lock before making the change. Caller should pass the buffer ID
3570 * if shared lock is held, InvalidBuffer if exclusive lock is already held.
3572 * Note: it might seem we could make the changes without exclusive lock, since
3573 * TransactionId read/write is assumed atomic anyway. However there is a race
3574 * condition: someone who just fetched an old XID that we overwrite here could
3575 * conceivably not finish checking the XID against pg_clog before we finish
3576 * the VACUUM and perhaps truncate off the part of pg_clog he needs. Getting
3577 * exclusive lock ensures no other backend is in process of checking the
3578 * tuple status. Also, getting exclusive lock makes it safe to adjust the
3582 heap_freeze_tuple(HeapTupleHeader tuple
, TransactionId cutoff_xid
,
3585 bool changed
= false;
3588 xid
= HeapTupleHeaderGetXmin(tuple
);
3589 if (TransactionIdIsNormal(xid
) &&
3590 TransactionIdPrecedes(xid
, cutoff_xid
))
3592 if (buf
!= InvalidBuffer
)
3594 /* trade in share lock for exclusive lock */
3595 LockBuffer(buf
, BUFFER_LOCK_UNLOCK
);
3596 LockBuffer(buf
, BUFFER_LOCK_EXCLUSIVE
);
3597 buf
= InvalidBuffer
;
3599 HeapTupleHeaderSetXmin(tuple
, FrozenTransactionId
);
3602 * Might as well fix the hint bits too; usually XMIN_COMMITTED will
3603 * already be set here, but there's a small chance not.
3605 Assert(!(tuple
->t_infomask
& HEAP_XMIN_INVALID
));
3606 tuple
->t_infomask
|= HEAP_XMIN_COMMITTED
;
3611 * When we release shared lock, it's possible for someone else to change
3612 * xmax before we get the lock back, so repeat the check after acquiring
3613 * exclusive lock. (We don't need this pushup for xmin, because only
3614 * VACUUM could be interested in changing an existing tuple's xmin, and
3615 * there's only one VACUUM allowed on a table at a time.)
3618 if (!(tuple
->t_infomask
& HEAP_XMAX_IS_MULTI
))
3620 xid
= HeapTupleHeaderGetXmax(tuple
);
3621 if (TransactionIdIsNormal(xid
) &&
3622 TransactionIdPrecedes(xid
, cutoff_xid
))
3624 if (buf
!= InvalidBuffer
)
3626 /* trade in share lock for exclusive lock */
3627 LockBuffer(buf
, BUFFER_LOCK_UNLOCK
);
3628 LockBuffer(buf
, BUFFER_LOCK_EXCLUSIVE
);
3629 buf
= InvalidBuffer
;
3630 goto recheck_xmax
; /* see comment above */
3632 HeapTupleHeaderSetXmax(tuple
, InvalidTransactionId
);
3635 * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED
3636 * + LOCKED. Normalize to INVALID just to be sure no one gets
3639 tuple
->t_infomask
&= ~HEAP_XMAX_COMMITTED
;
3640 tuple
->t_infomask
|= HEAP_XMAX_INVALID
;
3641 HeapTupleHeaderClearHotUpdated(tuple
);
3648 * XXX perhaps someday we should zero out very old MultiXactIds here?
3650 * The only way a stale MultiXactId could pose a problem is if a
3651 * tuple, having once been multiply-share-locked, is not touched by
3652 * any vacuum or attempted lock or deletion for just over 4G MultiXact
3653 * creations, and then in the probably-narrow window where its xmax
3654 * is again a live MultiXactId, someone tries to lock or delete it.
3655 * Even then, another share-lock attempt would work fine. An
3656 * exclusive-lock or delete attempt would face unexpected delay, or
3657 * in the very worst case get a deadlock error. This seems an
3658 * extremely low-probability scenario with minimal downside even if
3659 * it does happen, so for now we don't do the extra bookkeeping that
3660 * would be needed to clean out MultiXactIds.
3666 * Although xvac per se could only be set by VACUUM, it shares physical
3667 * storage space with cmax, and so could be wiped out by someone setting
3668 * xmax. Hence recheck after changing lock, same as for xmax itself.
3671 if (tuple
->t_infomask
& HEAP_MOVED
)
3673 xid
= HeapTupleHeaderGetXvac(tuple
);
3674 if (TransactionIdIsNormal(xid
) &&
3675 TransactionIdPrecedes(xid
, cutoff_xid
))
3677 if (buf
!= InvalidBuffer
)
3679 /* trade in share lock for exclusive lock */
3680 LockBuffer(buf
, BUFFER_LOCK_UNLOCK
);
3681 LockBuffer(buf
, BUFFER_LOCK_EXCLUSIVE
);
3682 buf
= InvalidBuffer
;
3683 goto recheck_xvac
; /* see comment above */
3687 * If a MOVED_OFF tuple is not dead, the xvac transaction must
3688 * have failed; whereas a non-dead MOVED_IN tuple must mean the
3689 * xvac transaction succeeded.
3691 if (tuple
->t_infomask
& HEAP_MOVED_OFF
)
3692 HeapTupleHeaderSetXvac(tuple
, InvalidTransactionId
);
3694 HeapTupleHeaderSetXvac(tuple
, FrozenTransactionId
);
3697 * Might as well fix the hint bits too; usually XMIN_COMMITTED
3698 * will already be set here, but there's a small chance not.
3700 Assert(!(tuple
->t_infomask
& HEAP_XMIN_INVALID
));
3701 tuple
->t_infomask
|= HEAP_XMIN_COMMITTED
;
3711 * heap_markpos - mark scan position
3715 heap_markpos(HeapScanDesc scan
)
3717 /* Note: no locking manipulations needed */
3719 if (scan
->rs_ctup
.t_data
!= NULL
)
3721 scan
->rs_mctid
= scan
->rs_ctup
.t_self
;
3722 if (scan
->rs_pageatatime
)
3723 scan
->rs_mindex
= scan
->rs_cindex
;
3726 ItemPointerSetInvalid(&scan
->rs_mctid
);
3730 * heap_restrpos - restore position to marked location
3734 heap_restrpos(HeapScanDesc scan
)
3736 /* XXX no amrestrpos checking that ammarkpos called */
3738 if (!ItemPointerIsValid(&scan
->rs_mctid
))
3740 scan
->rs_ctup
.t_data
= NULL
;
3743 * unpin scan buffers
3745 if (BufferIsValid(scan
->rs_cbuf
))
3746 ReleaseBuffer(scan
->rs_cbuf
);
3747 scan
->rs_cbuf
= InvalidBuffer
;
3748 scan
->rs_cblock
= InvalidBlockNumber
;
3749 scan
->rs_inited
= false;
3754 * If we reached end of scan, rs_inited will now be false. We must
3755 * reset it to true to keep heapgettup from doing the wrong thing.
3757 scan
->rs_inited
= true;
3758 scan
->rs_ctup
.t_self
= scan
->rs_mctid
;
3759 if (scan
->rs_pageatatime
)
3761 scan
->rs_cindex
= scan
->rs_mindex
;
3762 heapgettup_pagemode(scan
,
3763 NoMovementScanDirection
,
3764 0, /* needn't recheck scan keys */
3769 NoMovementScanDirection
,
3770 0, /* needn't recheck scan keys */
3776 * Perform XLogInsert for a heap-clean operation. Caller must already
3777 * have modified the buffer and marked it dirty.
3779 * Note: prior to Postgres 8.3, the entries in the nowunused[] array were
3780 * zero-based tuple indexes. Now they are one-based like other uses
3784 log_heap_clean(Relation reln
, Buffer buffer
,
3785 OffsetNumber
*redirected
, int nredirected
,
3786 OffsetNumber
*nowdead
, int ndead
,
3787 OffsetNumber
*nowunused
, int nunused
,
3790 xl_heap_clean xlrec
;
3793 XLogRecData rdata
[4];
3795 /* Caller should not call me on a temp relation */
3796 Assert(!reln
->rd_istemp
);
3798 xlrec
.node
= reln
->rd_node
;
3799 xlrec
.block
= BufferGetBlockNumber(buffer
);
3800 xlrec
.nredirected
= nredirected
;
3801 xlrec
.ndead
= ndead
;
3803 rdata
[0].data
= (char *) &xlrec
;
3804 rdata
[0].len
= SizeOfHeapClean
;
3805 rdata
[0].buffer
= InvalidBuffer
;
3806 rdata
[0].next
= &(rdata
[1]);
3809 * The OffsetNumber arrays are not actually in the buffer, but we pretend
3810 * that they are. When XLogInsert stores the whole buffer, the offset
3811 * arrays need not be stored too. Note that even if all three arrays are
3812 * empty, we want to expose the buffer as a candidate for whole-page
3813 * storage, since this record type implies a defragmentation operation
3814 * even if no item pointers changed state.
3816 if (nredirected
> 0)
3818 rdata
[1].data
= (char *) redirected
;
3819 rdata
[1].len
= nredirected
* sizeof(OffsetNumber
) * 2;
3823 rdata
[1].data
= NULL
;
3826 rdata
[1].buffer
= buffer
;
3827 rdata
[1].buffer_std
= true;
3828 rdata
[1].next
= &(rdata
[2]);
3832 rdata
[2].data
= (char *) nowdead
;
3833 rdata
[2].len
= ndead
* sizeof(OffsetNumber
);
3837 rdata
[2].data
= NULL
;
3840 rdata
[2].buffer
= buffer
;
3841 rdata
[2].buffer_std
= true;
3842 rdata
[2].next
= &(rdata
[3]);
3846 rdata
[3].data
= (char *) nowunused
;
3847 rdata
[3].len
= nunused
* sizeof(OffsetNumber
);
3851 rdata
[3].data
= NULL
;
3854 rdata
[3].buffer
= buffer
;
3855 rdata
[3].buffer_std
= true;
3856 rdata
[3].next
= NULL
;
3858 info
= redirect_move
? XLOG_HEAP2_CLEAN_MOVE
: XLOG_HEAP2_CLEAN
;
3859 recptr
= XLogInsert(RM_HEAP2_ID
, info
, rdata
);
3865 * Perform XLogInsert for a heap-freeze operation. Caller must already
3866 * have modified the buffer and marked it dirty.
3869 log_heap_freeze(Relation reln
, Buffer buffer
,
3870 TransactionId cutoff_xid
,
3871 OffsetNumber
*offsets
, int offcnt
)
3873 xl_heap_freeze xlrec
;
3875 XLogRecData rdata
[2];
3877 /* Caller should not call me on a temp relation */
3878 Assert(!reln
->rd_istemp
);
3879 /* nor when there are no tuples to freeze */
3882 xlrec
.node
= reln
->rd_node
;
3883 xlrec
.block
= BufferGetBlockNumber(buffer
);
3884 xlrec
.cutoff_xid
= cutoff_xid
;
3886 rdata
[0].data
= (char *) &xlrec
;
3887 rdata
[0].len
= SizeOfHeapFreeze
;
3888 rdata
[0].buffer
= InvalidBuffer
;
3889 rdata
[0].next
= &(rdata
[1]);
3892 * The tuple-offsets array is not actually in the buffer, but pretend that
3893 * it is. When XLogInsert stores the whole buffer, the offsets array need
3894 * not be stored too.
3896 rdata
[1].data
= (char *) offsets
;
3897 rdata
[1].len
= offcnt
* sizeof(OffsetNumber
);
3898 rdata
[1].buffer
= buffer
;
3899 rdata
[1].buffer_std
= true;
3900 rdata
[1].next
= NULL
;
3902 recptr
= XLogInsert(RM_HEAP2_ID
, XLOG_HEAP2_FREEZE
, rdata
);
3908 * Perform XLogInsert for a heap-update operation. Caller must already
3909 * have modified the buffer(s) and marked them dirty.
3912 log_heap_update(Relation reln
, Buffer oldbuf
, ItemPointerData from
,
3913 Buffer newbuf
, HeapTuple newtup
, bool move
)
3916 * Note: xlhdr is declared to have adequate size and correct alignment for
3917 * an xl_heap_header. However the two tids, if present at all, will be
3918 * packed in with no wasted space after the xl_heap_header; they aren't
3919 * necessarily aligned as implied by this struct declaration.
3927 int hsize
= SizeOfHeapHeader
;
3928 xl_heap_update xlrec
;
3931 XLogRecData rdata
[4];
3932 Page page
= BufferGetPage(newbuf
);
3934 /* Caller should not call me on a temp relation */
3935 Assert(!reln
->rd_istemp
);
3939 Assert(!HeapTupleIsHeapOnly(newtup
));
3940 info
= XLOG_HEAP_MOVE
;
3942 else if (HeapTupleIsHeapOnly(newtup
))
3943 info
= XLOG_HEAP_HOT_UPDATE
;
3945 info
= XLOG_HEAP_UPDATE
;
3947 xlrec
.target
.node
= reln
->rd_node
;
3948 xlrec
.target
.tid
= from
;
3949 xlrec
.all_visible_cleared
= PageIsAllVisible(BufferGetPage(oldbuf
));
3950 xlrec
.newtid
= newtup
->t_self
;
3951 xlrec
.new_all_visible_cleared
= PageIsAllVisible(BufferGetPage(newbuf
));
3953 rdata
[0].data
= (char *) &xlrec
;
3954 rdata
[0].len
= SizeOfHeapUpdate
;
3955 rdata
[0].buffer
= InvalidBuffer
;
3956 rdata
[0].next
= &(rdata
[1]);
3958 rdata
[1].data
= NULL
;
3960 rdata
[1].buffer
= oldbuf
;
3961 rdata
[1].buffer_std
= true;
3962 rdata
[1].next
= &(rdata
[2]);
3964 xlhdr
.hdr
.t_infomask2
= newtup
->t_data
->t_infomask2
;
3965 xlhdr
.hdr
.t_infomask
= newtup
->t_data
->t_infomask
;
3966 xlhdr
.hdr
.t_hoff
= newtup
->t_data
->t_hoff
;
3967 if (move
) /* remember xmax & xmin */
3969 TransactionId xid
[2]; /* xmax, xmin */
3971 if (newtup
->t_data
->t_infomask
& (HEAP_XMAX_INVALID
| HEAP_IS_LOCKED
))
3972 xid
[0] = InvalidTransactionId
;
3974 xid
[0] = HeapTupleHeaderGetXmax(newtup
->t_data
);
3975 xid
[1] = HeapTupleHeaderGetXmin(newtup
->t_data
);
3976 memcpy((char *) &xlhdr
+ hsize
,
3978 2 * sizeof(TransactionId
));
3979 hsize
+= 2 * sizeof(TransactionId
);
3983 * As with insert records, we need not store the rdata[2] segment if we
3984 * decide to store the whole buffer instead.
3986 rdata
[2].data
= (char *) &xlhdr
;
3987 rdata
[2].len
= hsize
;
3988 rdata
[2].buffer
= newbuf
;
3989 rdata
[2].buffer_std
= true;
3990 rdata
[2].next
= &(rdata
[3]);
3992 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
3993 rdata
[3].data
= (char *) newtup
->t_data
+ offsetof(HeapTupleHeaderData
, t_bits
);
3994 rdata
[3].len
= newtup
->t_len
- offsetof(HeapTupleHeaderData
, t_bits
);
3995 rdata
[3].buffer
= newbuf
;
3996 rdata
[3].buffer_std
= true;
3997 rdata
[3].next
= NULL
;
3999 /* If new tuple is the single and first tuple on page... */
4000 if (ItemPointerGetOffsetNumber(&(newtup
->t_self
)) == FirstOffsetNumber
&&
4001 PageGetMaxOffsetNumber(page
) == FirstOffsetNumber
)
4003 info
|= XLOG_HEAP_INIT_PAGE
;
4004 rdata
[2].buffer
= rdata
[3].buffer
= InvalidBuffer
;
4007 recptr
= XLogInsert(RM_HEAP_ID
, info
, rdata
);
4013 * Perform XLogInsert for a heap-move operation. Caller must already
4014 * have modified the buffers and marked them dirty.
4017 log_heap_move(Relation reln
, Buffer oldbuf
, ItemPointerData from
,
4018 Buffer newbuf
, HeapTuple newtup
)
4020 return log_heap_update(reln
, oldbuf
, from
, newbuf
, newtup
, true);
4024 * Perform XLogInsert of a HEAP_NEWPAGE record to WAL. Caller is responsible
4025 * for writing the page to disk after calling this routine.
4027 * Note: all current callers build pages in private memory and write them
4028 * directly to smgr, rather than using bufmgr. Therefore there is no need
4029 * to pass a buffer ID to XLogInsert, nor to perform MarkBufferDirty within
4030 * the critical section.
4032 * Note: the NEWPAGE log record is used for both heaps and indexes, so do
4033 * not do anything that assumes we are touching a heap.
4036 log_newpage(RelFileNode
*rnode
, ForkNumber forkNum
, BlockNumber blkno
,
4039 xl_heap_newpage xlrec
;
4041 XLogRecData rdata
[2];
4043 /* NO ELOG(ERROR) from here till newpage op is logged */
4044 START_CRIT_SECTION();
4046 xlrec
.node
= *rnode
;
4047 xlrec
.forknum
= forkNum
;
4048 xlrec
.blkno
= blkno
;
4050 rdata
[0].data
= (char *) &xlrec
;
4051 rdata
[0].len
= SizeOfHeapNewpage
;
4052 rdata
[0].buffer
= InvalidBuffer
;
4053 rdata
[0].next
= &(rdata
[1]);
4055 rdata
[1].data
= (char *) page
;
4056 rdata
[1].len
= BLCKSZ
;
4057 rdata
[1].buffer
= InvalidBuffer
;
4058 rdata
[1].next
= NULL
;
4060 recptr
= XLogInsert(RM_HEAP_ID
, XLOG_HEAP_NEWPAGE
, rdata
);
4062 PageSetLSN(page
, recptr
);
4063 PageSetTLI(page
, ThisTimeLineID
);
4071 * Handles CLEAN and CLEAN_MOVE record types
4074 heap_xlog_clean(XLogRecPtr lsn
, XLogRecord
*record
, bool clean_move
)
4076 xl_heap_clean
*xlrec
= (xl_heap_clean
*) XLogRecGetData(record
);
4080 OffsetNumber
*redirected
;
4081 OffsetNumber
*nowdead
;
4082 OffsetNumber
*nowunused
;
4088 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4091 buffer
= XLogReadBuffer(xlrec
->node
, xlrec
->block
, false);
4092 if (!BufferIsValid(buffer
))
4094 page
= (Page
) BufferGetPage(buffer
);
4096 if (XLByteLE(lsn
, PageGetLSN(page
)))
4098 UnlockReleaseBuffer(buffer
);
4102 nredirected
= xlrec
->nredirected
;
4103 ndead
= xlrec
->ndead
;
4104 end
= (OffsetNumber
*) ((char *) xlrec
+ record
->xl_len
);
4105 redirected
= (OffsetNumber
*) ((char *) xlrec
+ SizeOfHeapClean
);
4106 nowdead
= redirected
+ (nredirected
* 2);
4107 nowunused
= nowdead
+ ndead
;
4108 nunused
= (end
- nowunused
);
4109 Assert(nunused
>= 0);
4111 /* Update all item pointers per the record, and repair fragmentation */
4112 heap_page_prune_execute(buffer
,
4113 redirected
, nredirected
,
4118 freespace
= PageGetHeapFreeSpace(page
); /* needed to update FSM below */
4121 * Note: we don't worry about updating the page's prunability hints. At
4122 * worst this will cause an extra prune cycle to occur soon.
4125 PageSetLSN(page
, lsn
);
4126 PageSetTLI(page
, ThisTimeLineID
);
4127 MarkBufferDirty(buffer
);
4128 UnlockReleaseBuffer(buffer
);
4131 * Update the FSM as well.
4133 * XXX: We don't get here if the page was restored from full page image.
4134 * We don't bother to update the FSM in that case, it doesn't need to be
4135 * totally accurate anyway.
4137 XLogRecordPageWithFreeSpace(xlrec
->node
, xlrec
->block
, freespace
);
4141 heap_xlog_freeze(XLogRecPtr lsn
, XLogRecord
*record
)
4143 xl_heap_freeze
*xlrec
= (xl_heap_freeze
*) XLogRecGetData(record
);
4144 TransactionId cutoff_xid
= xlrec
->cutoff_xid
;
4148 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4151 buffer
= XLogReadBuffer(xlrec
->node
, xlrec
->block
, false);
4152 if (!BufferIsValid(buffer
))
4154 page
= (Page
) BufferGetPage(buffer
);
4156 if (XLByteLE(lsn
, PageGetLSN(page
)))
4158 UnlockReleaseBuffer(buffer
);
4162 if (record
->xl_len
> SizeOfHeapFreeze
)
4164 OffsetNumber
*offsets
;
4165 OffsetNumber
*offsets_end
;
4167 offsets
= (OffsetNumber
*) ((char *) xlrec
+ SizeOfHeapFreeze
);
4168 offsets_end
= (OffsetNumber
*) ((char *) xlrec
+ record
->xl_len
);
4170 while (offsets
< offsets_end
)
4172 /* offsets[] entries are one-based */
4173 ItemId lp
= PageGetItemId(page
, *offsets
);
4174 HeapTupleHeader tuple
= (HeapTupleHeader
) PageGetItem(page
, lp
);
4176 (void) heap_freeze_tuple(tuple
, cutoff_xid
, InvalidBuffer
);
4181 PageSetLSN(page
, lsn
);
4182 PageSetTLI(page
, ThisTimeLineID
);
4183 MarkBufferDirty(buffer
);
4184 UnlockReleaseBuffer(buffer
);
4188 heap_xlog_newpage(XLogRecPtr lsn
, XLogRecord
*record
)
4190 xl_heap_newpage
*xlrec
= (xl_heap_newpage
*) XLogRecGetData(record
);
4195 * Note: the NEWPAGE log record is used for both heaps and indexes, so do
4196 * not do anything that assumes we are touching a heap.
4198 buffer
= XLogReadBuffer(xlrec
->node
, xlrec
->blkno
, true);
4199 Assert(BufferIsValid(buffer
));
4200 page
= (Page
) BufferGetPage(buffer
);
4202 Assert(record
->xl_len
== SizeOfHeapNewpage
+ BLCKSZ
);
4203 memcpy(page
, (char *) xlrec
+ SizeOfHeapNewpage
, BLCKSZ
);
4205 PageSetLSN(page
, lsn
);
4206 PageSetTLI(page
, ThisTimeLineID
);
4207 MarkBufferDirty(buffer
);
4208 UnlockReleaseBuffer(buffer
);
4212 heap_xlog_delete(XLogRecPtr lsn
, XLogRecord
*record
)
4214 xl_heap_delete
*xlrec
= (xl_heap_delete
*) XLogRecGetData(record
);
4217 OffsetNumber offnum
;
4219 HeapTupleHeader htup
;
4222 blkno
= ItemPointerGetBlockNumber(&(xlrec
->target
.tid
));
4225 * The visibility map always needs to be updated, even if the heap page is
4226 * already up-to-date.
4228 if (xlrec
->all_visible_cleared
)
4230 Relation reln
= CreateFakeRelcacheEntry(xlrec
->target
.node
);
4232 visibilitymap_clear(reln
, blkno
);
4233 FreeFakeRelcacheEntry(reln
);
4236 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4239 buffer
= XLogReadBuffer(xlrec
->target
.node
, blkno
, false);
4240 if (!BufferIsValid(buffer
))
4242 page
= (Page
) BufferGetPage(buffer
);
4244 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4246 UnlockReleaseBuffer(buffer
);
4250 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->target
.tid
));
4251 if (PageGetMaxOffsetNumber(page
) >= offnum
)
4252 lp
= PageGetItemId(page
, offnum
);
4254 if (PageGetMaxOffsetNumber(page
) < offnum
|| !ItemIdIsNormal(lp
))
4255 elog(PANIC
, "heap_delete_redo: invalid lp");
4257 htup
= (HeapTupleHeader
) PageGetItem(page
, lp
);
4259 htup
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
4261 HEAP_XMAX_IS_MULTI
|
4264 HeapTupleHeaderClearHotUpdated(htup
);
4265 HeapTupleHeaderSetXmax(htup
, record
->xl_xid
);
4266 HeapTupleHeaderSetCmax(htup
, FirstCommandId
, false);
4268 /* Mark the page as a candidate for pruning */
4269 PageSetPrunable(page
, record
->xl_xid
);
4271 if (xlrec
->all_visible_cleared
)
4272 PageClearAllVisible(page
);
4274 /* Make sure there is no forward chain link in t_ctid */
4275 htup
->t_ctid
= xlrec
->target
.tid
;
4276 PageSetLSN(page
, lsn
);
4277 PageSetTLI(page
, ThisTimeLineID
);
4278 MarkBufferDirty(buffer
);
4279 UnlockReleaseBuffer(buffer
);
4283 heap_xlog_insert(XLogRecPtr lsn
, XLogRecord
*record
)
4285 xl_heap_insert
*xlrec
= (xl_heap_insert
*) XLogRecGetData(record
);
4288 OffsetNumber offnum
;
4291 HeapTupleHeaderData hdr
;
4292 char data
[MaxHeapTupleSize
];
4294 HeapTupleHeader htup
;
4295 xl_heap_header xlhdr
;
4300 blkno
= ItemPointerGetBlockNumber(&(xlrec
->target
.tid
));
4303 * The visibility map always needs to be updated, even if the heap page is
4304 * already up-to-date.
4306 if (xlrec
->all_visible_cleared
)
4308 Relation reln
= CreateFakeRelcacheEntry(xlrec
->target
.node
);
4310 visibilitymap_clear(reln
, blkno
);
4311 FreeFakeRelcacheEntry(reln
);
4314 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4317 if (record
->xl_info
& XLOG_HEAP_INIT_PAGE
)
4319 buffer
= XLogReadBuffer(xlrec
->target
.node
, blkno
, true);
4320 Assert(BufferIsValid(buffer
));
4321 page
= (Page
) BufferGetPage(buffer
);
4323 PageInit(page
, BufferGetPageSize(buffer
), 0);
4327 buffer
= XLogReadBuffer(xlrec
->target
.node
, blkno
, false);
4328 if (!BufferIsValid(buffer
))
4330 page
= (Page
) BufferGetPage(buffer
);
4332 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4334 UnlockReleaseBuffer(buffer
);
4339 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->target
.tid
));
4340 if (PageGetMaxOffsetNumber(page
) + 1 < offnum
)
4341 elog(PANIC
, "heap_insert_redo: invalid max offset number");
4343 newlen
= record
->xl_len
- SizeOfHeapInsert
- SizeOfHeapHeader
;
4344 Assert(newlen
<= MaxHeapTupleSize
);
4345 memcpy((char *) &xlhdr
,
4346 (char *) xlrec
+ SizeOfHeapInsert
,
4349 MemSet((char *) htup
, 0, sizeof(HeapTupleHeaderData
));
4350 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
4351 memcpy((char *) htup
+ offsetof(HeapTupleHeaderData
, t_bits
),
4352 (char *) xlrec
+ SizeOfHeapInsert
+ SizeOfHeapHeader
,
4354 newlen
+= offsetof(HeapTupleHeaderData
, t_bits
);
4355 htup
->t_infomask2
= xlhdr
.t_infomask2
;
4356 htup
->t_infomask
= xlhdr
.t_infomask
;
4357 htup
->t_hoff
= xlhdr
.t_hoff
;
4358 HeapTupleHeaderSetXmin(htup
, record
->xl_xid
);
4359 HeapTupleHeaderSetCmin(htup
, FirstCommandId
);
4360 htup
->t_ctid
= xlrec
->target
.tid
;
4362 offnum
= PageAddItem(page
, (Item
) htup
, newlen
, offnum
, true, true);
4363 if (offnum
== InvalidOffsetNumber
)
4364 elog(PANIC
, "heap_insert_redo: failed to add tuple");
4366 freespace
= PageGetHeapFreeSpace(page
); /* needed to update FSM below */
4368 PageSetLSN(page
, lsn
);
4369 PageSetTLI(page
, ThisTimeLineID
);
4371 if (xlrec
->all_visible_cleared
)
4372 PageClearAllVisible(page
);
4374 MarkBufferDirty(buffer
);
4375 UnlockReleaseBuffer(buffer
);
4378 * If the page is running low on free space, update the FSM as well.
4379 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
4380 * better than that without knowing the fill-factor for the table.
4382 * XXX: We don't get here if the page was restored from full page image.
4383 * We don't bother to update the FSM in that case, it doesn't need to be
4384 * totally accurate anyway.
4386 if (freespace
< BLCKSZ
/ 5)
4387 XLogRecordPageWithFreeSpace(xlrec
->target
.node
, blkno
, freespace
);
4391 * Handles UPDATE, HOT_UPDATE & MOVE
4394 heap_xlog_update(XLogRecPtr lsn
, XLogRecord
*record
, bool move
, bool hot_update
)
4396 xl_heap_update
*xlrec
= (xl_heap_update
*) XLogRecGetData(record
);
4398 bool samepage
= (ItemPointerGetBlockNumber(&(xlrec
->newtid
)) ==
4399 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)));
4401 OffsetNumber offnum
;
4403 HeapTupleHeader htup
;
4406 HeapTupleHeaderData hdr
;
4407 char data
[MaxHeapTupleSize
];
4409 xl_heap_header xlhdr
;
4415 * The visibility map always needs to be updated, even if the heap page is
4416 * already up-to-date.
4418 if (xlrec
->all_visible_cleared
)
4420 Relation reln
= CreateFakeRelcacheEntry(xlrec
->target
.node
);
4422 visibilitymap_clear(reln
,
4423 ItemPointerGetBlockNumber(&xlrec
->target
.tid
));
4424 FreeFakeRelcacheEntry(reln
);
4427 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4430 return; /* backup block covered both changes */
4434 /* Deal with old tuple version */
4436 buffer
= XLogReadBuffer(xlrec
->target
.node
,
4437 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)),
4439 if (!BufferIsValid(buffer
))
4441 page
= (Page
) BufferGetPage(buffer
);
4443 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4445 UnlockReleaseBuffer(buffer
);
4451 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->target
.tid
));
4452 if (PageGetMaxOffsetNumber(page
) >= offnum
)
4453 lp
= PageGetItemId(page
, offnum
);
4455 if (PageGetMaxOffsetNumber(page
) < offnum
|| !ItemIdIsNormal(lp
))
4456 elog(PANIC
, "heap_update_redo: invalid lp");
4458 htup
= (HeapTupleHeader
) PageGetItem(page
, lp
);
4462 htup
->t_infomask
&= ~(HEAP_XMIN_COMMITTED
|
4465 htup
->t_infomask
|= HEAP_MOVED_OFF
;
4466 HeapTupleHeaderClearHotUpdated(htup
);
4467 HeapTupleHeaderSetXvac(htup
, record
->xl_xid
);
4468 /* Make sure there is no forward chain link in t_ctid */
4469 htup
->t_ctid
= xlrec
->target
.tid
;
4473 htup
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
4475 HEAP_XMAX_IS_MULTI
|
4479 HeapTupleHeaderSetHotUpdated(htup
);
4481 HeapTupleHeaderClearHotUpdated(htup
);
4482 HeapTupleHeaderSetXmax(htup
, record
->xl_xid
);
4483 HeapTupleHeaderSetCmax(htup
, FirstCommandId
, false);
4484 /* Set forward chain link in t_ctid */
4485 htup
->t_ctid
= xlrec
->newtid
;
4488 /* Mark the page as a candidate for pruning */
4489 PageSetPrunable(page
, record
->xl_xid
);
4491 if (xlrec
->all_visible_cleared
)
4492 PageClearAllVisible(page
);
4495 * this test is ugly, but necessary to avoid thinking that insert change
4496 * is already applied
4500 PageSetLSN(page
, lsn
);
4501 PageSetTLI(page
, ThisTimeLineID
);
4502 MarkBufferDirty(buffer
);
4503 UnlockReleaseBuffer(buffer
);
4505 /* Deal with new tuple */
4510 * The visibility map always needs to be updated, even if the heap page is
4511 * already up-to-date.
4513 if (xlrec
->new_all_visible_cleared
)
4515 Relation reln
= CreateFakeRelcacheEntry(xlrec
->target
.node
);
4517 visibilitymap_clear(reln
, ItemPointerGetBlockNumber(&xlrec
->newtid
));
4518 FreeFakeRelcacheEntry(reln
);
4521 if (record
->xl_info
& XLR_BKP_BLOCK_2
)
4524 if (record
->xl_info
& XLOG_HEAP_INIT_PAGE
)
4526 buffer
= XLogReadBuffer(xlrec
->target
.node
,
4527 ItemPointerGetBlockNumber(&(xlrec
->newtid
)),
4529 Assert(BufferIsValid(buffer
));
4530 page
= (Page
) BufferGetPage(buffer
);
4532 PageInit(page
, BufferGetPageSize(buffer
), 0);
4536 buffer
= XLogReadBuffer(xlrec
->target
.node
,
4537 ItemPointerGetBlockNumber(&(xlrec
->newtid
)),
4539 if (!BufferIsValid(buffer
))
4541 page
= (Page
) BufferGetPage(buffer
);
4543 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4545 UnlockReleaseBuffer(buffer
);
4552 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->newtid
));
4553 if (PageGetMaxOffsetNumber(page
) + 1 < offnum
)
4554 elog(PANIC
, "heap_update_redo: invalid max offset number");
4556 hsize
= SizeOfHeapUpdate
+ SizeOfHeapHeader
;
4558 hsize
+= (2 * sizeof(TransactionId
));
4560 newlen
= record
->xl_len
- hsize
;
4561 Assert(newlen
<= MaxHeapTupleSize
);
4562 memcpy((char *) &xlhdr
,
4563 (char *) xlrec
+ SizeOfHeapUpdate
,
4566 MemSet((char *) htup
, 0, sizeof(HeapTupleHeaderData
));
4567 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
4568 memcpy((char *) htup
+ offsetof(HeapTupleHeaderData
, t_bits
),
4569 (char *) xlrec
+ hsize
,
4571 newlen
+= offsetof(HeapTupleHeaderData
, t_bits
);
4572 htup
->t_infomask2
= xlhdr
.t_infomask2
;
4573 htup
->t_infomask
= xlhdr
.t_infomask
;
4574 htup
->t_hoff
= xlhdr
.t_hoff
;
4578 TransactionId xid
[2]; /* xmax, xmin */
4580 memcpy((char *) xid
,
4581 (char *) xlrec
+ SizeOfHeapUpdate
+ SizeOfHeapHeader
,
4582 2 * sizeof(TransactionId
));
4583 HeapTupleHeaderSetXmin(htup
, xid
[1]);
4584 HeapTupleHeaderSetXmax(htup
, xid
[0]);
4585 HeapTupleHeaderSetXvac(htup
, record
->xl_xid
);
4589 HeapTupleHeaderSetXmin(htup
, record
->xl_xid
);
4590 HeapTupleHeaderSetCmin(htup
, FirstCommandId
);
4592 /* Make sure there is no forward chain link in t_ctid */
4593 htup
->t_ctid
= xlrec
->newtid
;
4595 offnum
= PageAddItem(page
, (Item
) htup
, newlen
, offnum
, true, true);
4596 if (offnum
== InvalidOffsetNumber
)
4597 elog(PANIC
, "heap_update_redo: failed to add tuple");
4599 if (xlrec
->new_all_visible_cleared
)
4600 PageClearAllVisible(page
);
4602 freespace
= PageGetHeapFreeSpace(page
); /* needed to update FSM below */
4604 PageSetLSN(page
, lsn
);
4605 PageSetTLI(page
, ThisTimeLineID
);
4606 MarkBufferDirty(buffer
);
4607 UnlockReleaseBuffer(buffer
);
4610 * If the page is running low on free space, update the FSM as well.
4611 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
4612 * better than that without knowing the fill-factor for the table.
4614 * However, don't update the FSM on HOT updates, because after crash
4615 * recovery, either the old or the new tuple will certainly be dead and
4616 * prunable. After pruning, the page will have roughly as much free space
4617 * as it did before the update, assuming the new tuple is about the same
4618 * size as the old one.
4620 * XXX: We don't get here if the page was restored from full page image.
4621 * We don't bother to update the FSM in that case, it doesn't need to be
4622 * totally accurate anyway.
4624 if (!hot_update
&& freespace
< BLCKSZ
/ 5)
4625 XLogRecordPageWithFreeSpace(xlrec
->target
.node
,
4626 ItemPointerGetBlockNumber(&(xlrec
->newtid
)), freespace
);
4630 heap_xlog_lock(XLogRecPtr lsn
, XLogRecord
*record
)
4632 xl_heap_lock
*xlrec
= (xl_heap_lock
*) XLogRecGetData(record
);
4635 OffsetNumber offnum
;
4637 HeapTupleHeader htup
;
4639 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4642 buffer
= XLogReadBuffer(xlrec
->target
.node
,
4643 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)),
4645 if (!BufferIsValid(buffer
))
4647 page
= (Page
) BufferGetPage(buffer
);
4649 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4651 UnlockReleaseBuffer(buffer
);
4655 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->target
.tid
));
4656 if (PageGetMaxOffsetNumber(page
) >= offnum
)
4657 lp
= PageGetItemId(page
, offnum
);
4659 if (PageGetMaxOffsetNumber(page
) < offnum
|| !ItemIdIsNormal(lp
))
4660 elog(PANIC
, "heap_lock_redo: invalid lp");
4662 htup
= (HeapTupleHeader
) PageGetItem(page
, lp
);
4664 htup
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
4666 HEAP_XMAX_IS_MULTI
|
4669 if (xlrec
->xid_is_mxact
)
4670 htup
->t_infomask
|= HEAP_XMAX_IS_MULTI
;
4671 if (xlrec
->shared_lock
)
4672 htup
->t_infomask
|= HEAP_XMAX_SHARED_LOCK
;
4674 htup
->t_infomask
|= HEAP_XMAX_EXCL_LOCK
;
4675 HeapTupleHeaderClearHotUpdated(htup
);
4676 HeapTupleHeaderSetXmax(htup
, xlrec
->locking_xid
);
4677 HeapTupleHeaderSetCmax(htup
, FirstCommandId
, false);
4678 /* Make sure there is no forward chain link in t_ctid */
4679 htup
->t_ctid
= xlrec
->target
.tid
;
4680 PageSetLSN(page
, lsn
);
4681 PageSetTLI(page
, ThisTimeLineID
);
4682 MarkBufferDirty(buffer
);
4683 UnlockReleaseBuffer(buffer
);
4687 heap_xlog_inplace(XLogRecPtr lsn
, XLogRecord
*record
)
4689 xl_heap_inplace
*xlrec
= (xl_heap_inplace
*) XLogRecGetData(record
);
4692 OffsetNumber offnum
;
4694 HeapTupleHeader htup
;
4698 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4701 buffer
= XLogReadBuffer(xlrec
->target
.node
,
4702 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)),
4704 if (!BufferIsValid(buffer
))
4706 page
= (Page
) BufferGetPage(buffer
);
4708 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4710 UnlockReleaseBuffer(buffer
);
4714 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->target
.tid
));
4715 if (PageGetMaxOffsetNumber(page
) >= offnum
)
4716 lp
= PageGetItemId(page
, offnum
);
4718 if (PageGetMaxOffsetNumber(page
) < offnum
|| !ItemIdIsNormal(lp
))
4719 elog(PANIC
, "heap_inplace_redo: invalid lp");
4721 htup
= (HeapTupleHeader
) PageGetItem(page
, lp
);
4723 oldlen
= ItemIdGetLength(lp
) - htup
->t_hoff
;
4724 newlen
= record
->xl_len
- SizeOfHeapInplace
;
4725 if (oldlen
!= newlen
)
4726 elog(PANIC
, "heap_inplace_redo: wrong tuple length");
4728 memcpy((char *) htup
+ htup
->t_hoff
,
4729 (char *) xlrec
+ SizeOfHeapInplace
,
4732 PageSetLSN(page
, lsn
);
4733 PageSetTLI(page
, ThisTimeLineID
);
4734 MarkBufferDirty(buffer
);
4735 UnlockReleaseBuffer(buffer
);
4739 heap_redo(XLogRecPtr lsn
, XLogRecord
*record
)
4741 uint8 info
= record
->xl_info
& ~XLR_INFO_MASK
;
4743 RestoreBkpBlocks(lsn
, record
, false);
4745 switch (info
& XLOG_HEAP_OPMASK
)
4747 case XLOG_HEAP_INSERT
:
4748 heap_xlog_insert(lsn
, record
);
4750 case XLOG_HEAP_DELETE
:
4751 heap_xlog_delete(lsn
, record
);
4753 case XLOG_HEAP_UPDATE
:
4754 heap_xlog_update(lsn
, record
, false, false);
4756 case XLOG_HEAP_MOVE
:
4757 heap_xlog_update(lsn
, record
, true, false);
4759 case XLOG_HEAP_HOT_UPDATE
:
4760 heap_xlog_update(lsn
, record
, false, true);
4762 case XLOG_HEAP_NEWPAGE
:
4763 heap_xlog_newpage(lsn
, record
);
4765 case XLOG_HEAP_LOCK
:
4766 heap_xlog_lock(lsn
, record
);
4768 case XLOG_HEAP_INPLACE
:
4769 heap_xlog_inplace(lsn
, record
);
4772 elog(PANIC
, "heap_redo: unknown op code %u", info
);
4777 heap2_redo(XLogRecPtr lsn
, XLogRecord
*record
)
4779 uint8 info
= record
->xl_info
& ~XLR_INFO_MASK
;
4781 switch (info
& XLOG_HEAP_OPMASK
)
4783 case XLOG_HEAP2_FREEZE
:
4784 RestoreBkpBlocks(lsn
, record
, false);
4785 heap_xlog_freeze(lsn
, record
);
4787 case XLOG_HEAP2_CLEAN
:
4788 RestoreBkpBlocks(lsn
, record
, true);
4789 heap_xlog_clean(lsn
, record
, false);
4791 case XLOG_HEAP2_CLEAN_MOVE
:
4792 RestoreBkpBlocks(lsn
, record
, true);
4793 heap_xlog_clean(lsn
, record
, true);
4796 elog(PANIC
, "heap2_redo: unknown op code %u", info
);
4801 out_target(StringInfo buf
, xl_heaptid
*target
)
4803 appendStringInfo(buf
, "rel %u/%u/%u; tid %u/%u",
4804 target
->node
.spcNode
, target
->node
.dbNode
, target
->node
.relNode
,
4805 ItemPointerGetBlockNumber(&(target
->tid
)),
4806 ItemPointerGetOffsetNumber(&(target
->tid
)));
4810 heap_desc(StringInfo buf
, uint8 xl_info
, char *rec
)
4812 uint8 info
= xl_info
& ~XLR_INFO_MASK
;
4814 info
&= XLOG_HEAP_OPMASK
;
4815 if (info
== XLOG_HEAP_INSERT
)
4817 xl_heap_insert
*xlrec
= (xl_heap_insert
*) rec
;
4819 if (xl_info
& XLOG_HEAP_INIT_PAGE
)
4820 appendStringInfo(buf
, "insert(init): ");
4822 appendStringInfo(buf
, "insert: ");
4823 out_target(buf
, &(xlrec
->target
));
4825 else if (info
== XLOG_HEAP_DELETE
)
4827 xl_heap_delete
*xlrec
= (xl_heap_delete
*) rec
;
4829 appendStringInfo(buf
, "delete: ");
4830 out_target(buf
, &(xlrec
->target
));
4832 else if (info
== XLOG_HEAP_UPDATE
)
4834 xl_heap_update
*xlrec
= (xl_heap_update
*) rec
;
4836 if (xl_info
& XLOG_HEAP_INIT_PAGE
)
4837 appendStringInfo(buf
, "update(init): ");
4839 appendStringInfo(buf
, "update: ");
4840 out_target(buf
, &(xlrec
->target
));
4841 appendStringInfo(buf
, "; new %u/%u",
4842 ItemPointerGetBlockNumber(&(xlrec
->newtid
)),
4843 ItemPointerGetOffsetNumber(&(xlrec
->newtid
)));
4845 else if (info
== XLOG_HEAP_MOVE
)
4847 xl_heap_update
*xlrec
= (xl_heap_update
*) rec
;
4849 if (xl_info
& XLOG_HEAP_INIT_PAGE
)
4850 appendStringInfo(buf
, "move(init): ");
4852 appendStringInfo(buf
, "move: ");
4853 out_target(buf
, &(xlrec
->target
));
4854 appendStringInfo(buf
, "; new %u/%u",
4855 ItemPointerGetBlockNumber(&(xlrec
->newtid
)),
4856 ItemPointerGetOffsetNumber(&(xlrec
->newtid
)));
4858 else if (info
== XLOG_HEAP_HOT_UPDATE
)
4860 xl_heap_update
*xlrec
= (xl_heap_update
*) rec
;
4862 if (xl_info
& XLOG_HEAP_INIT_PAGE
) /* can this case happen? */
4863 appendStringInfo(buf
, "hot_update(init): ");
4865 appendStringInfo(buf
, "hot_update: ");
4866 out_target(buf
, &(xlrec
->target
));
4867 appendStringInfo(buf
, "; new %u/%u",
4868 ItemPointerGetBlockNumber(&(xlrec
->newtid
)),
4869 ItemPointerGetOffsetNumber(&(xlrec
->newtid
)));
4871 else if (info
== XLOG_HEAP_NEWPAGE
)
4873 xl_heap_newpage
*xlrec
= (xl_heap_newpage
*) rec
;
4875 appendStringInfo(buf
, "newpage: rel %u/%u/%u; blk %u",
4876 xlrec
->node
.spcNode
, xlrec
->node
.dbNode
,
4877 xlrec
->node
.relNode
, xlrec
->blkno
);
4879 else if (info
== XLOG_HEAP_LOCK
)
4881 xl_heap_lock
*xlrec
= (xl_heap_lock
*) rec
;
4883 if (xlrec
->shared_lock
)
4884 appendStringInfo(buf
, "shared_lock: ");
4886 appendStringInfo(buf
, "exclusive_lock: ");
4887 if (xlrec
->xid_is_mxact
)
4888 appendStringInfo(buf
, "mxid ");
4890 appendStringInfo(buf
, "xid ");
4891 appendStringInfo(buf
, "%u ", xlrec
->locking_xid
);
4892 out_target(buf
, &(xlrec
->target
));
4894 else if (info
== XLOG_HEAP_INPLACE
)
4896 xl_heap_inplace
*xlrec
= (xl_heap_inplace
*) rec
;
4898 appendStringInfo(buf
, "inplace: ");
4899 out_target(buf
, &(xlrec
->target
));
4902 appendStringInfo(buf
, "UNKNOWN");
4906 heap2_desc(StringInfo buf
, uint8 xl_info
, char *rec
)
4908 uint8 info
= xl_info
& ~XLR_INFO_MASK
;
4910 info
&= XLOG_HEAP_OPMASK
;
4911 if (info
== XLOG_HEAP2_FREEZE
)
4913 xl_heap_freeze
*xlrec
= (xl_heap_freeze
*) rec
;
4915 appendStringInfo(buf
, "freeze: rel %u/%u/%u; blk %u; cutoff %u",
4916 xlrec
->node
.spcNode
, xlrec
->node
.dbNode
,
4917 xlrec
->node
.relNode
, xlrec
->block
,
4920 else if (info
== XLOG_HEAP2_CLEAN
)
4922 xl_heap_clean
*xlrec
= (xl_heap_clean
*) rec
;
4924 appendStringInfo(buf
, "clean: rel %u/%u/%u; blk %u",
4925 xlrec
->node
.spcNode
, xlrec
->node
.dbNode
,
4926 xlrec
->node
.relNode
, xlrec
->block
);
4928 else if (info
== XLOG_HEAP2_CLEAN_MOVE
)
4930 xl_heap_clean
*xlrec
= (xl_heap_clean
*) rec
;
4932 appendStringInfo(buf
, "clean_move: rel %u/%u/%u; blk %u",
4933 xlrec
->node
.spcNode
, xlrec
->node
.dbNode
,
4934 xlrec
->node
.relNode
, xlrec
->block
);
4937 appendStringInfo(buf
, "UNKNOWN");
4941 * heap_sync - sync a heap, for use when no WAL has been written
4943 * This forces the heap contents (including TOAST heap if any) down to disk.
4944 * If we skipped using WAL, and it's not a temp relation, we must force the
4945 * relation down to disk before it's safe to commit the transaction. This
4946 * requires writing out any dirty buffers and then doing a forced fsync.
4948 * Indexes are not touched. (Currently, index operations associated with
4949 * the commands that use this are WAL-logged and so do not need fsync.
4950 * That behavior might change someday, but in any case it's likely that
4951 * any fsync decisions required would be per-index and hence not appropriate
4955 heap_sync(Relation rel
)
4957 /* temp tables never need fsync */
4962 FlushRelationBuffers(rel
);
4963 /* FlushRelationBuffers will have opened rd_smgr */
4964 smgrimmedsync(rel
->rd_smgr
, MAIN_FORKNUM
);
4966 /* FSM is not critical, don't bother syncing it */
4968 /* toast heap, if any */
4969 if (OidIsValid(rel
->rd_rel
->reltoastrelid
))
4973 toastrel
= heap_open(rel
->rd_rel
->reltoastrelid
, AccessShareLock
);
4974 FlushRelationBuffers(toastrel
);
4975 smgrimmedsync(toastrel
->rd_smgr
, MAIN_FORKNUM
);
4976 heap_close(toastrel
, AccessShareLock
);