1 /*-------------------------------------------------------------------------
4 * POSTGRES heap access method input/output code.
6 * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
13 *-------------------------------------------------------------------------
18 #include "access/heapam.h"
19 #include "access/hio.h"
20 #include "storage/bufmgr.h"
21 #include "storage/freespace.h"
22 #include "storage/lmgr.h"
26 * RelationPutHeapTuple - place tuple at specified page
28 * !!! EREPORT(ERROR) IS DISALLOWED HERE !!! Must PANIC on failure!!!
30 * Note - caller must hold BUFFER_LOCK_EXCLUSIVE on the buffer.
33 RelationPutHeapTuple(Relation relation
,
42 /* Add the tuple to the page */
43 pageHeader
= BufferGetPage(buffer
);
45 offnum
= PageAddItem(pageHeader
, (Item
) tuple
->t_data
,
46 tuple
->t_len
, InvalidOffsetNumber
, false, true);
48 if (offnum
== InvalidOffsetNumber
)
49 elog(PANIC
, "failed to add tuple to page");
51 /* Update tuple->t_self to the actual position where it was stored */
52 ItemPointerSet(&(tuple
->t_self
), BufferGetBlockNumber(buffer
), offnum
);
54 /* Insert the correct position into CTID of the stored tuple, too */
55 itemId
= PageGetItemId(pageHeader
, offnum
);
56 item
= PageGetItem(pageHeader
, itemId
);
57 ((HeapTupleHeader
) item
)->t_ctid
= tuple
->t_self
;
61 * Read in a buffer, using bulk-insert strategy if bistate isn't NULL.
64 ReadBufferBI(Relation relation
, BlockNumber targetBlock
,
65 BulkInsertState bistate
)
69 /* If not bulk-insert, exactly like ReadBuffer */
71 return ReadBuffer(relation
, targetBlock
);
73 /* If we have the desired block already pinned, re-pin and return it */
74 if (bistate
->current_buf
!= InvalidBuffer
)
76 if (BufferGetBlockNumber(bistate
->current_buf
) == targetBlock
)
78 IncrBufferRefCount(bistate
->current_buf
);
79 return bistate
->current_buf
;
81 /* ... else drop the old buffer */
82 ReleaseBuffer(bistate
->current_buf
);
83 bistate
->current_buf
= InvalidBuffer
;
86 /* Perform a read using the buffer strategy */
87 buffer
= ReadBufferExtended(relation
, MAIN_FORKNUM
, targetBlock
,
88 RBM_NORMAL
, bistate
->strategy
);
90 /* Save the selected block as target for future inserts */
91 IncrBufferRefCount(buffer
);
92 bistate
->current_buf
= buffer
;
98 * RelationGetBufferForTuple
100 * Returns pinned and exclusive-locked buffer of a page in given relation
101 * with free space >= given len.
103 * If otherBuffer is not InvalidBuffer, then it references a previously
104 * pinned buffer of another page in the same relation; on return, this
105 * buffer will also be exclusive-locked. (This case is used by heap_update;
106 * the otherBuffer contains the tuple being updated.)
108 * The reason for passing otherBuffer is that if two backends are doing
109 * concurrent heap_update operations, a deadlock could occur if they try
110 * to lock the same two buffers in opposite orders. To ensure that this
111 * can't happen, we impose the rule that buffers of a relation must be
112 * locked in increasing page number order. This is most conveniently done
113 * by having RelationGetBufferForTuple lock them both, with suitable care
116 * NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the
117 * same buffer we select for insertion of the new tuple (this could only
118 * happen if space is freed in that page after heap_update finds there's not
119 * enough there). In that case, the page will be pinned and locked only once.
121 * We normally use FSM to help us find free space. However,
122 * if HEAP_INSERT_SKIP_FSM is specified, we just append a new empty page to
123 * the end of the relation if the tuple won't fit on the current target page.
124 * This can save some cycles when we know the relation is new and doesn't
125 * contain useful amounts of free space.
127 * HEAP_INSERT_SKIP_FSM is also useful for non-WAL-logged additions to a
128 * relation, if the caller holds exclusive lock and is careful to invalidate
129 * relation->rd_targblock before the first insertion --- that ensures that
130 * all insertions will occur into newly added pages and not be intermixed
131 * with tuples from other transactions. That way, a crash can't risk losing
132 * any committed data of other transactions. (See heap_insert's comments
133 * for additional constraints needed for safe usage of this behavior.)
135 * The caller can also provide a BulkInsertState object to optimize many
136 * insertions into the same relation. This keeps a pin on the current
137 * insertion target page (to save pin/unpin cycles) and also passes a
138 * BULKWRITE buffer selection strategy object to the buffer manager.
139 * Passing NULL for bistate selects the default behavior.
141 * We always try to avoid filling existing pages further than the fillfactor.
142 * This is OK since this routine is not consulted when updating a tuple and
143 * keeping it on the same page, which is the scenario fillfactor is meant
144 * to reserve space for.
146 * ereport(ERROR) is allowed here, so this routine *must* be called
147 * before any (unlogged) changes are made in buffer pool.
150 RelationGetBufferForTuple(Relation relation
, Size len
,
151 Buffer otherBuffer
, int options
,
152 struct BulkInsertStateData
*bistate
)
154 bool use_fsm
= !(options
& HEAP_INSERT_SKIP_FSM
);
155 Buffer buffer
= InvalidBuffer
;
159 BlockNumber targetBlock
,
163 len
= MAXALIGN(len
); /* be conservative */
165 /* Bulk insert is not supported for updates, only inserts. */
166 Assert(otherBuffer
== InvalidBuffer
|| !bistate
);
169 * If we're gonna fail for oversize tuple, do it right away
171 if (len
> MaxHeapTupleSize
)
173 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED
),
174 errmsg("row is too big: size %lu, maximum size %lu",
176 (unsigned long) MaxHeapTupleSize
)));
178 /* Compute desired extra freespace due to fillfactor option */
179 saveFreeSpace
= RelationGetTargetPageFreeSpace(relation
,
180 HEAP_DEFAULT_FILLFACTOR
);
182 if (otherBuffer
!= InvalidBuffer
)
183 otherBlock
= BufferGetBlockNumber(otherBuffer
);
185 otherBlock
= InvalidBlockNumber
; /* just to keep compiler quiet */
188 * We first try to put the tuple on the same page we last inserted a tuple
189 * on, as cached in the BulkInsertState or relcache entry. If that
190 * doesn't work, we ask the Free Space Map to locate a suitable page.
191 * Since the FSM's info might be out of date, we have to be prepared to
192 * loop around and retry multiple times. (To insure this isn't an infinite
193 * loop, we must update the FSM with the correct amount of free space on
194 * each page that proves not to be suitable.) If the FSM has no record of
195 * a page with enough free space, we give up and extend the relation.
197 * When use_fsm is false, we either put the tuple onto the existing target
198 * page or extend the relation.
200 if (len
+ saveFreeSpace
> MaxHeapTupleSize
)
202 /* can't fit, don't bother asking FSM */
203 targetBlock
= InvalidBlockNumber
;
206 else if (bistate
&& bistate
->current_buf
!= InvalidBuffer
)
207 targetBlock
= BufferGetBlockNumber(bistate
->current_buf
);
209 targetBlock
= relation
->rd_targblock
;
211 if (targetBlock
== InvalidBlockNumber
&& use_fsm
)
214 * We have no cached target page, so ask the FSM for an initial
217 targetBlock
= GetPageWithFreeSpace(relation
, len
+ saveFreeSpace
);
220 * If the FSM knows nothing of the rel, try the last page before we
221 * give up and extend. This avoids one-tuple-per-page syndrome during
222 * bootstrapping or in a recently-started system.
224 if (targetBlock
== InvalidBlockNumber
)
226 BlockNumber nblocks
= RelationGetNumberOfBlocks(relation
);
229 targetBlock
= nblocks
- 1;
233 while (targetBlock
!= InvalidBlockNumber
)
236 * Read and exclusive-lock the target block, as well as the other
237 * block if one was given, taking suitable care with lock ordering and
238 * the possibility they are the same block.
240 if (otherBuffer
== InvalidBuffer
)
243 buffer
= ReadBufferBI(relation
, targetBlock
, bistate
);
244 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
246 else if (otherBlock
== targetBlock
)
249 buffer
= otherBuffer
;
250 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
252 else if (otherBlock
< targetBlock
)
254 /* lock other buffer first */
255 buffer
= ReadBuffer(relation
, targetBlock
);
256 LockBuffer(otherBuffer
, BUFFER_LOCK_EXCLUSIVE
);
257 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
261 /* lock target buffer first */
262 buffer
= ReadBuffer(relation
, targetBlock
);
263 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
264 LockBuffer(otherBuffer
, BUFFER_LOCK_EXCLUSIVE
);
268 * Now we can check to see if there's enough free space here. If so,
271 page
= BufferGetPage(buffer
);
272 pageFreeSpace
= PageGetHeapFreeSpace(page
);
273 if (len
+ saveFreeSpace
<= pageFreeSpace
)
275 /* use this page as future insert target, too */
276 relation
->rd_targblock
= targetBlock
;
281 * Not enough space, so we must give up our page locks and pin (if
282 * any) and prepare to look elsewhere. We don't care which order we
283 * unlock the two buffers in, so this can be slightly simpler than the
286 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
287 if (otherBuffer
== InvalidBuffer
)
288 ReleaseBuffer(buffer
);
289 else if (otherBlock
!= targetBlock
)
291 LockBuffer(otherBuffer
, BUFFER_LOCK_UNLOCK
);
292 ReleaseBuffer(buffer
);
295 /* Without FSM, always fall out of the loop and extend */
300 * Update FSM as to condition of this page, and ask for another page
303 targetBlock
= RecordAndGetPageWithFreeSpace(relation
,
306 len
+ saveFreeSpace
);
310 * Have to extend the relation.
312 * We have to use a lock to ensure no one else is extending the rel at the
313 * same time, else we will both try to initialize the same new page. We
314 * can skip locking for new or temp relations, however, since no one else
315 * could be accessing them.
317 needLock
= !RELATION_IS_LOCAL(relation
);
320 LockRelationForExtension(relation
, ExclusiveLock
);
323 * XXX This does an lseek - rather expensive - but at the moment it is the
324 * only way to accurately determine how many blocks are in a relation. Is
325 * it worth keeping an accurate file length in shared memory someplace,
326 * rather than relying on the kernel to do it for us?
328 buffer
= ReadBufferBI(relation
, P_NEW
, bistate
);
331 * We can be certain that locking the otherBuffer first is OK, since it
332 * must have a lower page number.
334 if (otherBuffer
!= InvalidBuffer
)
335 LockBuffer(otherBuffer
, BUFFER_LOCK_EXCLUSIVE
);
338 * Now acquire lock on the new page.
340 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
343 * Release the file-extension lock; it's now OK for someone else to extend
344 * the relation some more. Note that we cannot release this lock before
345 * we have buffer lock on the new page, or we risk a race condition
346 * against vacuumlazy.c --- see comments therein.
349 UnlockRelationForExtension(relation
, ExclusiveLock
);
352 * We need to initialize the empty new page. Double-check that it really
353 * is empty (this should never happen, but if it does we don't want to
354 * risk wiping out valid data).
356 page
= BufferGetPage(buffer
);
358 if (!PageIsNew(page
))
359 elog(ERROR
, "page %u of relation \"%s\" should be empty but is not",
360 BufferGetBlockNumber(buffer
),
361 RelationGetRelationName(relation
));
363 PageInit(page
, BufferGetPageSize(buffer
), 0);
365 if (len
> PageGetHeapFreeSpace(page
))
367 /* We should not get here given the test at the top */
368 elog(PANIC
, "tuple is too big: size %lu", (unsigned long) len
);
372 * Remember the new page as our target for future insertions.
374 * XXX should we enter the new page into the free space map immediately,
375 * or just keep it for this backend's exclusive use in the short run
376 * (until VACUUM sees it)? Seems to depend on whether you expect the
377 * current backend to make more insertions or not, which is probably a
378 * good bet most of the time. So for now, don't add it to FSM yet.
380 relation
->rd_targblock
= BufferGetBlockNumber(buffer
);