Fix xslt_process() to ensure that it inserts a NULL terminator after the
[PostgreSQL.git] / src / backend / access / transam / varsup.c
blob54060c619c578259d39b3fd5b02e09e4453f8aea
1 /*-------------------------------------------------------------------------
3 * varsup.c
4 * postgres OID & XID variables support routines
6 * Copyright (c) 2000-2009, PostgreSQL Global Development Group
8 * IDENTIFICATION
9 * $PostgreSQL$
11 *-------------------------------------------------------------------------
14 #include "postgres.h"
16 #include "access/clog.h"
17 #include "access/subtrans.h"
18 #include "access/transam.h"
19 #include "miscadmin.h"
20 #include "postmaster/autovacuum.h"
21 #include "storage/pmsignal.h"
22 #include "storage/proc.h"
23 #include "utils/builtins.h"
26 /* Number of OIDs to prefetch (preallocate) per XLOG write */
27 #define VAR_OID_PREFETCH 8192
29 /* pointer to "variable cache" in shared memory (set up by shmem.c) */
30 VariableCache ShmemVariableCache = NULL;
34 * Allocate the next XID for my new transaction or subtransaction.
36 * The new XID is also stored into MyProc before returning.
38 TransactionId
39 GetNewTransactionId(bool isSubXact)
41 TransactionId xid;
44 * During bootstrap initialization, we return the special bootstrap
45 * transaction id.
47 if (IsBootstrapProcessingMode())
49 Assert(!isSubXact);
50 MyProc->xid = BootstrapTransactionId;
51 return BootstrapTransactionId;
54 LWLockAcquire(XidGenLock, LW_EXCLUSIVE);
56 xid = ShmemVariableCache->nextXid;
58 /*----------
59 * Check to see if it's safe to assign another XID. This protects against
60 * catastrophic data loss due to XID wraparound. The basic rules are:
62 * If we're past xidVacLimit, start trying to force autovacuum cycles.
63 * If we're past xidWarnLimit, start issuing warnings.
64 * If we're past xidStopLimit, refuse to execute transactions, unless
65 * we are running in a standalone backend (which gives an escape hatch
66 * to the DBA who somehow got past the earlier defenses).
68 * Test is coded to fall out as fast as possible during normal operation,
69 * ie, when the vac limit is set and we haven't violated it.
70 *----------
72 if (TransactionIdFollowsOrEquals(xid, ShmemVariableCache->xidVacLimit) &&
73 TransactionIdIsValid(ShmemVariableCache->xidVacLimit))
76 * To avoid swamping the postmaster with signals, we issue the autovac
77 * request only once per 64K transaction starts. This still gives
78 * plenty of chances before we get into real trouble.
80 if (IsUnderPostmaster && (xid % 65536) == 0)
81 SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER);
83 if (IsUnderPostmaster &&
84 TransactionIdFollowsOrEquals(xid, ShmemVariableCache->xidStopLimit))
85 ereport(ERROR,
86 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
87 errmsg("database is not accepting commands to avoid wraparound data loss in database \"%s\"",
88 NameStr(ShmemVariableCache->limit_datname)),
89 errhint("Stop the postmaster and use a standalone backend to vacuum database \"%s\".\n"
90 "You might also need to commit or roll back old prepared transactions.",
91 NameStr(ShmemVariableCache->limit_datname))));
92 else if (TransactionIdFollowsOrEquals(xid, ShmemVariableCache->xidWarnLimit))
93 ereport(WARNING,
94 (errmsg("database \"%s\" must be vacuumed within %u transactions",
95 NameStr(ShmemVariableCache->limit_datname),
96 ShmemVariableCache->xidWrapLimit - xid),
97 errhint("To avoid a database shutdown, execute a database-wide VACUUM in \"%s\".\n"
98 "You might also need to commit or roll back old prepared transactions.",
99 NameStr(ShmemVariableCache->limit_datname))));
103 * If we are allocating the first XID of a new page of the commit log,
104 * zero out that commit-log page before returning. We must do this while
105 * holding XidGenLock, else another xact could acquire and commit a later
106 * XID before we zero the page. Fortunately, a page of the commit log
107 * holds 32K or more transactions, so we don't have to do this very often.
109 * Extend pg_subtrans too.
111 ExtendCLOG(xid);
112 ExtendSUBTRANS(xid);
115 * Now advance the nextXid counter. This must not happen until after we
116 * have successfully completed ExtendCLOG() --- if that routine fails, we
117 * want the next incoming transaction to try it again. We cannot assign
118 * more XIDs until there is CLOG space for them.
120 TransactionIdAdvance(ShmemVariableCache->nextXid);
123 * We must store the new XID into the shared ProcArray before releasing
124 * XidGenLock. This ensures that every active XID older than
125 * latestCompletedXid is present in the ProcArray, which is essential for
126 * correct OldestXmin tracking; see src/backend/access/transam/README.
128 * XXX by storing xid into MyProc without acquiring ProcArrayLock, we are
129 * relying on fetch/store of an xid to be atomic, else other backends
130 * might see a partially-set xid here. But holding both locks at once
131 * would be a nasty concurrency hit. So for now, assume atomicity.
133 * Note that readers of PGPROC xid fields should be careful to fetch the
134 * value only once, rather than assume they can read a value multiple
135 * times and get the same answer each time.
137 * The same comments apply to the subxact xid count and overflow fields.
139 * A solution to the atomic-store problem would be to give each PGPROC its
140 * own spinlock used only for fetching/storing that PGPROC's xid and
141 * related fields.
143 * If there's no room to fit a subtransaction XID into PGPROC, set the
144 * cache-overflowed flag instead. This forces readers to look in
145 * pg_subtrans to map subtransaction XIDs up to top-level XIDs. There is a
146 * race-condition window, in that the new XID will not appear as running
147 * until its parent link has been placed into pg_subtrans. However, that
148 * will happen before anyone could possibly have a reason to inquire about
149 * the status of the XID, so it seems OK. (Snapshots taken during this
150 * window *will* include the parent XID, so they will deliver the correct
151 * answer later on when someone does have a reason to inquire.)
155 * Use volatile pointer to prevent code rearrangement; other backends
156 * could be examining my subxids info concurrently, and we don't want
157 * them to see an invalid intermediate state, such as incrementing
158 * nxids before filling the array entry. Note we are assuming that
159 * TransactionId and int fetch/store are atomic.
161 volatile PGPROC *myproc = MyProc;
163 if (!isSubXact)
164 myproc->xid = xid;
165 else
167 int nxids = myproc->subxids.nxids;
169 if (nxids < PGPROC_MAX_CACHED_SUBXIDS)
171 myproc->subxids.xids[nxids] = xid;
172 myproc->subxids.nxids = nxids + 1;
174 else
175 myproc->subxids.overflowed = true;
179 LWLockRelease(XidGenLock);
181 return xid;
185 * Read nextXid but don't allocate it.
187 TransactionId
188 ReadNewTransactionId(void)
190 TransactionId xid;
192 LWLockAcquire(XidGenLock, LW_SHARED);
193 xid = ShmemVariableCache->nextXid;
194 LWLockRelease(XidGenLock);
196 return xid;
200 * Determine the last safe XID to allocate given the currently oldest
201 * datfrozenxid (ie, the oldest XID that might exist in any database
202 * of our cluster).
204 void
205 SetTransactionIdLimit(TransactionId oldest_datfrozenxid,
206 Name oldest_datname)
208 TransactionId xidVacLimit;
209 TransactionId xidWarnLimit;
210 TransactionId xidStopLimit;
211 TransactionId xidWrapLimit;
212 TransactionId curXid;
214 Assert(TransactionIdIsNormal(oldest_datfrozenxid));
217 * The place where we actually get into deep trouble is halfway around
218 * from the oldest potentially-existing XID. (This calculation is
219 * probably off by one or two counts, because the special XIDs reduce the
220 * size of the loop a little bit. But we throw in plenty of slop below,
221 * so it doesn't matter.)
223 xidWrapLimit = oldest_datfrozenxid + (MaxTransactionId >> 1);
224 if (xidWrapLimit < FirstNormalTransactionId)
225 xidWrapLimit += FirstNormalTransactionId;
228 * We'll refuse to continue assigning XIDs in interactive mode once we get
229 * within 1M transactions of data loss. This leaves lots of room for the
230 * DBA to fool around fixing things in a standalone backend, while not
231 * being significant compared to total XID space. (Note that since
232 * vacuuming requires one transaction per table cleaned, we had better be
233 * sure there's lots of XIDs left...)
235 xidStopLimit = xidWrapLimit - 1000000;
236 if (xidStopLimit < FirstNormalTransactionId)
237 xidStopLimit -= FirstNormalTransactionId;
240 * We'll start complaining loudly when we get within 10M transactions of
241 * the stop point. This is kind of arbitrary, but if you let your gas
242 * gauge get down to 1% of full, would you be looking for the next gas
243 * station? We need to be fairly liberal about this number because there
244 * are lots of scenarios where most transactions are done by automatic
245 * clients that won't pay attention to warnings. (No, we're not gonna make
246 * this configurable. If you know enough to configure it, you know enough
247 * to not get in this kind of trouble in the first place.)
249 xidWarnLimit = xidStopLimit - 10000000;
250 if (xidWarnLimit < FirstNormalTransactionId)
251 xidWarnLimit -= FirstNormalTransactionId;
254 * We'll start trying to force autovacuums when oldest_datfrozenxid gets
255 * to be more than autovacuum_freeze_max_age transactions old.
257 * Note: guc.c ensures that autovacuum_freeze_max_age is in a sane range,
258 * so that xidVacLimit will be well before xidWarnLimit.
260 * Note: autovacuum_freeze_max_age is a PGC_POSTMASTER parameter so that
261 * we don't have to worry about dealing with on-the-fly changes in its
262 * value. It doesn't look practical to update shared state from a GUC
263 * assign hook (too many processes would try to execute the hook,
264 * resulting in race conditions as well as crashes of those not connected
265 * to shared memory). Perhaps this can be improved someday.
267 xidVacLimit = oldest_datfrozenxid + autovacuum_freeze_max_age;
268 if (xidVacLimit < FirstNormalTransactionId)
269 xidVacLimit += FirstNormalTransactionId;
271 /* Grab lock for just long enough to set the new limit values */
272 LWLockAcquire(XidGenLock, LW_EXCLUSIVE);
273 ShmemVariableCache->oldestXid = oldest_datfrozenxid;
274 ShmemVariableCache->xidVacLimit = xidVacLimit;
275 ShmemVariableCache->xidWarnLimit = xidWarnLimit;
276 ShmemVariableCache->xidStopLimit = xidStopLimit;
277 ShmemVariableCache->xidWrapLimit = xidWrapLimit;
278 namecpy(&ShmemVariableCache->limit_datname, oldest_datname);
279 curXid = ShmemVariableCache->nextXid;
280 LWLockRelease(XidGenLock);
282 /* Log the info */
283 ereport(DEBUG1,
284 (errmsg("transaction ID wrap limit is %u, limited by database \"%s\"",
285 xidWrapLimit, NameStr(*oldest_datname))));
288 * If past the autovacuum force point, immediately signal an autovac
289 * request. The reason for this is that autovac only processes one
290 * database per invocation. Once it's finished cleaning up the oldest
291 * database, it'll call here, and we'll signal the postmaster to start
292 * another iteration immediately if there are still any old databases.
294 if (TransactionIdFollowsOrEquals(curXid, xidVacLimit) &&
295 IsUnderPostmaster)
296 SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER);
298 /* Give an immediate warning if past the wrap warn point */
299 if (TransactionIdFollowsOrEquals(curXid, xidWarnLimit))
300 ereport(WARNING,
301 (errmsg("database \"%s\" must be vacuumed within %u transactions",
302 NameStr(*oldest_datname),
303 xidWrapLimit - curXid),
304 errhint("To avoid a database shutdown, execute a database-wide VACUUM in \"%s\".\n"
305 "You might also need to commit or roll back old prepared transactions.",
306 NameStr(*oldest_datname))));
311 * GetNewObjectId -- allocate a new OID
313 * OIDs are generated by a cluster-wide counter. Since they are only 32 bits
314 * wide, counter wraparound will occur eventually, and therefore it is unwise
315 * to assume they are unique unless precautions are taken to make them so.
316 * Hence, this routine should generally not be used directly. The only
317 * direct callers should be GetNewOid() and GetNewRelFileNode() in
318 * catalog/catalog.c.
321 GetNewObjectId(void)
323 Oid result;
325 LWLockAcquire(OidGenLock, LW_EXCLUSIVE);
328 * Check for wraparound of the OID counter. We *must* not return 0
329 * (InvalidOid); and as long as we have to check that, it seems a good
330 * idea to skip over everything below FirstNormalObjectId too. (This
331 * basically just avoids lots of collisions with bootstrap-assigned OIDs
332 * right after a wrap occurs, so as to avoid a possibly large number of
333 * iterations in GetNewOid.) Note we are relying on unsigned comparison.
335 * During initdb, we start the OID generator at FirstBootstrapObjectId, so
336 * we only enforce wrapping to that point when in bootstrap or standalone
337 * mode. The first time through this routine after normal postmaster
338 * start, the counter will be forced up to FirstNormalObjectId. This
339 * mechanism leaves the OIDs between FirstBootstrapObjectId and
340 * FirstNormalObjectId available for automatic assignment during initdb,
341 * while ensuring they will never conflict with user-assigned OIDs.
343 if (ShmemVariableCache->nextOid < ((Oid) FirstNormalObjectId))
345 if (IsPostmasterEnvironment)
347 /* wraparound in normal environment */
348 ShmemVariableCache->nextOid = FirstNormalObjectId;
349 ShmemVariableCache->oidCount = 0;
351 else
353 /* we may be bootstrapping, so don't enforce the full range */
354 if (ShmemVariableCache->nextOid < ((Oid) FirstBootstrapObjectId))
356 /* wraparound in standalone environment? */
357 ShmemVariableCache->nextOid = FirstBootstrapObjectId;
358 ShmemVariableCache->oidCount = 0;
363 /* If we run out of logged for use oids then we must log more */
364 if (ShmemVariableCache->oidCount == 0)
366 XLogPutNextOid(ShmemVariableCache->nextOid + VAR_OID_PREFETCH);
367 ShmemVariableCache->oidCount = VAR_OID_PREFETCH;
370 result = ShmemVariableCache->nextOid;
372 (ShmemVariableCache->nextOid)++;
373 (ShmemVariableCache->oidCount)--;
375 LWLockRelease(OidGenLock);
377 return result;