1 /*-------------------------------------------------------------------------
5 * The background writer (bgwriter) is new as of Postgres 8.0. It attempts
6 * to keep regular backends from having to write out dirty shared buffers
7 * (which they would only do when needing to free a shared buffer to read in
8 * another page). In the best scenario all writes from shared buffers will
9 * be issued by the background writer process. However, regular backends are
10 * still empowered to issue writes if the bgwriter fails to maintain enough
11 * clean shared buffers.
13 * The bgwriter is also charged with handling all checkpoints. It will
14 * automatically dispatch a checkpoint after a certain amount of time has
15 * elapsed since the last one, and it can be signaled to perform requested
16 * checkpoints as well. (The GUC parameter that mandates a checkpoint every
17 * so many WAL segments is implemented by having backends signal the bgwriter
18 * when they fill WAL segments; the bgwriter itself doesn't watch for the
21 * The bgwriter is started by the postmaster as soon as the startup subprocess
22 * finishes, or as soon as recovery begins if we are doing archive recovery.
23 * It remains alive until the postmaster commands it to terminate.
24 * Normal termination is by SIGUSR2, which instructs the bgwriter to execute
25 * a shutdown checkpoint and then exit(0). (All backends must be stopped
26 * before SIGUSR2 is issued!) Emergency termination is by SIGQUIT; like any
27 * backend, the bgwriter will simply abort and exit on SIGQUIT.
29 * If the bgwriter exits unexpectedly, the postmaster treats that the same
30 * as a backend crash: shared memory may be corrupted, so remaining backends
31 * should be killed by SIGQUIT and then a recovery cycle started. (Even if
32 * shared memory isn't corrupted, we have lost information about which
33 * files need to be fsync'd for the next checkpoint, and so a system
34 * restart needs to be forced.)
37 * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
43 *-------------------------------------------------------------------------
52 #include "access/xlog_internal.h"
53 #include "catalog/pg_control.h"
54 #include "libpq/pqsignal.h"
55 #include "miscadmin.h"
57 #include "postmaster/bgwriter.h"
58 #include "storage/bufmgr.h"
59 #include "storage/fd.h"
60 #include "storage/ipc.h"
61 #include "storage/lwlock.h"
62 #include "storage/pmsignal.h"
63 #include "storage/shmem.h"
64 #include "storage/smgr.h"
65 #include "storage/spin.h"
66 #include "tcop/tcopprot.h"
67 #include "utils/guc.h"
68 #include "utils/memutils.h"
69 #include "utils/resowner.h"
73 * Shared memory area for communication between bgwriter and backends
75 * The ckpt counters allow backends to watch for completion of a checkpoint
76 * request they send. Here's how it works:
77 * * At start of a checkpoint, bgwriter reads (and clears) the request flags
78 * and increments ckpt_started, while holding ckpt_lck.
79 * * On completion of a checkpoint, bgwriter sets ckpt_done to
81 * * On failure of a checkpoint, bgwriter increments ckpt_failed
82 * and sets ckpt_done to equal ckpt_started.
84 * The algorithm for backends is:
85 * 1. Record current values of ckpt_failed and ckpt_started, and
86 * set request flags, while holding ckpt_lck.
87 * 2. Send signal to request checkpoint.
88 * 3. Sleep until ckpt_started changes. Now you know a checkpoint has
89 * begun since you started this algorithm (although *not* that it was
90 * specifically initiated by your signal), and that it is using your flags.
91 * 4. Record new value of ckpt_started.
92 * 5. Sleep until ckpt_done >= saved value of ckpt_started. (Use modulo
93 * arithmetic here in case counters wrap around.) Now you know a
94 * checkpoint has started and completed, but not whether it was
96 * 6. If ckpt_failed is different from the originally saved value,
97 * assume request failed; otherwise it was definitely successful.
99 * ckpt_flags holds the OR of the checkpoint request flags sent by all
100 * requesting backends since the last checkpoint start. The flags are
101 * chosen so that OR'ing is the correct way to combine multiple requests.
103 * num_backend_writes is used to count the number of buffer writes performed
104 * by non-bgwriter processes. This counter should be wide enough that it
105 * can't overflow during a single bgwriter cycle.
107 * The requests array holds fsync requests sent by backends and not yet
108 * absorbed by the bgwriter.
110 * Unlike the checkpoint fields, num_backend_writes and the requests
111 * fields are protected by BgWriterCommLock.
118 BlockNumber segno
; /* see md.c for special values */
119 /* might add a real request-type field later; not needed yet */
124 pid_t bgwriter_pid
; /* PID of bgwriter (0 if not started) */
126 slock_t ckpt_lck
; /* protects all the ckpt_* fields */
128 int ckpt_started
; /* advances when checkpoint starts */
129 int ckpt_done
; /* advances when checkpoint done */
130 int ckpt_failed
; /* advances when checkpoint fails */
132 int ckpt_flags
; /* checkpoint flags, as defined in xlog.h */
134 uint32 num_backend_writes
; /* counts non-bgwriter buffer writes */
136 int num_requests
; /* current # of requests */
137 int max_requests
; /* allocated array size */
138 BgWriterRequest requests
[1]; /* VARIABLE LENGTH ARRAY */
139 } BgWriterShmemStruct
;
141 static BgWriterShmemStruct
*BgWriterShmem
;
143 /* interval for calling AbsorbFsyncRequests in CheckpointWriteDelay */
144 #define WRITES_PER_ABSORB 1000
149 int BgWriterDelay
= 200;
150 int CheckPointTimeout
= 300;
151 int CheckPointWarning
= 30;
152 double CheckPointCompletionTarget
= 0.5;
155 * Flags set by interrupt handlers for later service in the main loop.
157 static volatile sig_atomic_t got_SIGHUP
= false;
158 static volatile sig_atomic_t checkpoint_requested
= false;
159 static volatile sig_atomic_t shutdown_requested
= false;
164 static bool am_bg_writer
= false;
166 static bool ckpt_active
= false;
168 /* these values are valid when ckpt_active is true: */
169 static pg_time_t ckpt_start_time
;
170 static XLogRecPtr ckpt_start_recptr
;
171 static double ckpt_cached_elapsed
;
173 static pg_time_t last_checkpoint_time
;
174 static pg_time_t last_xlog_switch_time
;
176 /* Prototypes for private functions */
178 static void CheckArchiveTimeout(void);
179 static void BgWriterNap(void);
180 static bool IsCheckpointOnSchedule(double progress
);
181 static bool ImmediateCheckpointRequested(void);
183 /* Signal handlers */
185 static void bg_quickdie(SIGNAL_ARGS
);
186 static void BgSigHupHandler(SIGNAL_ARGS
);
187 static void ReqCheckpointHandler(SIGNAL_ARGS
);
188 static void ReqShutdownHandler(SIGNAL_ARGS
);
192 * Main entry point for bgwriter process
194 * This is invoked from BootstrapMain, which has already created the basic
195 * execution environment, but not enabled signals yet.
198 BackgroundWriterMain(void)
200 sigjmp_buf local_sigjmp_buf
;
201 MemoryContext bgwriter_context
;
203 BgWriterShmem
->bgwriter_pid
= MyProcPid
;
207 * If possible, make this process a group leader, so that the postmaster
208 * can signal any child processes too. (bgwriter probably never has any
209 * child processes, but for consistency we make all postmaster child
210 * processes do this.)
214 elog(FATAL
, "setsid() failed: %m");
218 * Properly accept or ignore signals the postmaster might send us
220 * Note: we deliberately ignore SIGTERM, because during a standard Unix
221 * system shutdown cycle, init will SIGTERM all processes at once. We
222 * want to wait for the backends to exit, whereupon the postmaster will
223 * tell us it's okay to shut down (via SIGUSR2).
225 * SIGUSR1 is presently unused; keep it spare in case someday we want this
226 * process to participate in sinval messaging.
228 pqsignal(SIGHUP
, BgSigHupHandler
); /* set flag to read config file */
229 pqsignal(SIGINT
, ReqCheckpointHandler
); /* request checkpoint */
230 pqsignal(SIGTERM
, SIG_IGN
); /* ignore SIGTERM */
231 pqsignal(SIGQUIT
, bg_quickdie
); /* hard crash time */
232 pqsignal(SIGALRM
, SIG_IGN
);
233 pqsignal(SIGPIPE
, SIG_IGN
);
234 pqsignal(SIGUSR1
, SIG_IGN
); /* reserve for sinval */
235 pqsignal(SIGUSR2
, ReqShutdownHandler
); /* request shutdown */
238 * Reset some signals that are accepted by postmaster but not here
240 pqsignal(SIGCHLD
, SIG_DFL
);
241 pqsignal(SIGTTIN
, SIG_DFL
);
242 pqsignal(SIGTTOU
, SIG_DFL
);
243 pqsignal(SIGCONT
, SIG_DFL
);
244 pqsignal(SIGWINCH
, SIG_DFL
);
246 /* We allow SIGQUIT (quickdie) at all times */
247 #ifdef HAVE_SIGPROCMASK
248 sigdelset(&BlockSig
, SIGQUIT
);
250 BlockSig
&= ~(sigmask(SIGQUIT
));
254 * Initialize so that first time-driven event happens at the correct time.
256 last_checkpoint_time
= last_xlog_switch_time
= (pg_time_t
) time(NULL
);
259 * Create a resource owner to keep track of our resources (currently only
262 CurrentResourceOwner
= ResourceOwnerCreate(NULL
, "Background Writer");
265 * Create a memory context that we will do all our work in. We do this so
266 * that we can reset the context during error recovery and thereby avoid
267 * possible memory leaks. Formerly this code just ran in
268 * TopMemoryContext, but resetting that would be a really bad idea.
270 bgwriter_context
= AllocSetContextCreate(TopMemoryContext
,
272 ALLOCSET_DEFAULT_MINSIZE
,
273 ALLOCSET_DEFAULT_INITSIZE
,
274 ALLOCSET_DEFAULT_MAXSIZE
);
275 MemoryContextSwitchTo(bgwriter_context
);
278 * If an exception is encountered, processing resumes here.
280 * See notes in postgres.c about the design of this coding.
282 if (sigsetjmp(local_sigjmp_buf
, 1) != 0)
284 /* Since not using PG_TRY, must reset error stack by hand */
285 error_context_stack
= NULL
;
287 /* Prevent interrupts while cleaning up */
290 /* Report the error to the server log */
294 * These operations are really just a minimal subset of
295 * AbortTransaction(). We don't have very many resources to worry
296 * about in bgwriter, but we do have LWLocks, buffers, and temp files.
301 /* buffer pins are released here: */
302 ResourceOwnerRelease(CurrentResourceOwner
,
303 RESOURCE_RELEASE_BEFORE_LOCKS
,
305 /* we needn't bother with the other ResourceOwnerRelease phases */
306 AtEOXact_Buffers(false);
308 AtEOXact_HashTables(false);
310 /* Warn any waiting backends that the checkpoint failed. */
313 /* use volatile pointer to prevent code rearrangement */
314 volatile BgWriterShmemStruct
*bgs
= BgWriterShmem
;
316 SpinLockAcquire(&bgs
->ckpt_lck
);
318 bgs
->ckpt_done
= bgs
->ckpt_started
;
319 SpinLockRelease(&bgs
->ckpt_lck
);
325 * Now return to normal top-level context and clear ErrorContext for
328 MemoryContextSwitchTo(bgwriter_context
);
331 /* Flush any leaked data in the top-level context */
332 MemoryContextResetAndDeleteChildren(bgwriter_context
);
334 /* Now we can allow interrupts again */
338 * Sleep at least 1 second after any error. A write error is likely
339 * to be repeated, and we don't want to be filling the error logs as
345 * Close all open files after any error. This is helpful on Windows,
346 * where holding deleted files open causes various strange errors.
347 * It's not clear we need it elsewhere, but shouldn't hurt.
352 /* We can now handle ereport(ERROR) */
353 PG_exception_stack
= &local_sigjmp_buf
;
356 * Unblock signals (they were blocked when the postmaster forked us)
358 PG_SETMASK(&UnBlockSig
);
365 bool do_checkpoint
= false;
371 * Emergency bailout if postmaster has died. This is to avoid the
372 * necessity for manual cleanup of all postmaster children.
374 if (!PostmasterIsAlive(true))
378 * Process any requests or signals received recently.
380 AbsorbFsyncRequests();
385 ProcessConfigFile(PGC_SIGHUP
);
387 if (checkpoint_requested
)
389 checkpoint_requested
= false;
390 do_checkpoint
= true;
391 BgWriterStats
.m_requested_checkpoints
++;
393 if (shutdown_requested
)
396 * From here on, elog(ERROR) should end with exit(1), not send
397 * control back to the sigsetjmp block above
399 ExitOnAnyError
= true;
400 /* Close down the database */
402 /* Normal exit from the bgwriter is here */
403 proc_exit(0); /* done */
407 * Force a checkpoint if too much time has elapsed since the last one.
408 * Note that we count a timed checkpoint in stats only when this
409 * occurs without an external request, but we set the CAUSE_TIME flag
410 * bit even if there is also an external request.
412 now
= (pg_time_t
) time(NULL
);
413 elapsed_secs
= now
- last_checkpoint_time
;
414 if (elapsed_secs
>= CheckPointTimeout
)
417 BgWriterStats
.m_timed_checkpoints
++;
418 do_checkpoint
= true;
419 flags
|= CHECKPOINT_CAUSE_TIME
;
423 * Do a checkpoint if requested, otherwise do one cycle of
424 * dirty-buffer writing.
428 bool ckpt_performed
= false;
429 bool do_restartpoint
;
431 /* use volatile pointer to prevent code rearrangement */
432 volatile BgWriterShmemStruct
*bgs
= BgWriterShmem
;
435 * Check if we should perform a checkpoint or a restartpoint. As a
436 * side-effect, RecoveryInProgress() initializes TimeLineID if
439 do_restartpoint
= RecoveryInProgress();
442 * Atomically fetch the request flags to figure out what kind of a
443 * checkpoint we should perform, and increase the started-counter
444 * to acknowledge that we've started a new checkpoint.
446 SpinLockAcquire(&bgs
->ckpt_lck
);
447 flags
|= bgs
->ckpt_flags
;
450 SpinLockRelease(&bgs
->ckpt_lck
);
453 * The end-of-recovery checkpoint is a real checkpoint that's
454 * performed while we're still in recovery.
456 if (flags
& CHECKPOINT_END_OF_RECOVERY
)
457 do_restartpoint
= false;
460 * We will warn if (a) too soon since last checkpoint (whatever
461 * caused it) and (b) somebody set the CHECKPOINT_CAUSE_XLOG flag
462 * since the last checkpoint start. Note in particular that this
463 * implementation will not generate warnings caused by
464 * CheckPointTimeout < CheckPointWarning.
466 if (!do_restartpoint
&&
467 (flags
& CHECKPOINT_CAUSE_XLOG
) &&
468 elapsed_secs
< CheckPointWarning
)
470 (errmsg_plural("checkpoints are occurring too frequently (%d second apart)",
471 "checkpoints are occurring too frequently (%d seconds apart)",
474 errhint("Consider increasing the configuration parameter \"checkpoint_segments\".")));
477 * Initialize bgwriter-private variables used during checkpoint.
480 if (!do_restartpoint
)
481 ckpt_start_recptr
= GetInsertRecPtr();
482 ckpt_start_time
= now
;
483 ckpt_cached_elapsed
= 0;
488 if (!do_restartpoint
)
490 CreateCheckPoint(flags
);
491 ckpt_performed
= true;
494 ckpt_performed
= CreateRestartPoint(flags
);
497 * After any checkpoint, close all smgr files. This is so we
498 * won't hang onto smgr references to deleted files indefinitely.
503 * Indicate checkpoint completion to any waiting backends.
505 SpinLockAcquire(&bgs
->ckpt_lck
);
506 bgs
->ckpt_done
= bgs
->ckpt_started
;
507 SpinLockRelease(&bgs
->ckpt_lck
);
512 * Note we record the checkpoint start time not end time as
513 * last_checkpoint_time. This is so that time-driven
514 * checkpoints happen at a predictable spacing.
516 last_checkpoint_time
= now
;
521 * We were not able to perform the restartpoint (checkpoints
522 * throw an ERROR in case of error). Most likely because we
523 * have not received any new checkpoint WAL records since the
524 * last restartpoint. Try again in 15 s.
526 last_checkpoint_time
= now
- CheckPointTimeout
+ 15;
534 /* Check for archive_timeout and switch xlog files if necessary. */
535 CheckArchiveTimeout();
537 /* Nap for the configured time. */
543 * CheckArchiveTimeout -- check for archive_timeout and switch xlog files
547 CheckArchiveTimeout(void)
552 if (XLogArchiveTimeout
<= 0 || RecoveryInProgress())
555 now
= (pg_time_t
) time(NULL
);
557 /* First we do a quick check using possibly-stale local state. */
558 if ((int) (now
- last_xlog_switch_time
) < XLogArchiveTimeout
)
562 * Update local state ... note that last_xlog_switch_time is the last time
563 * a switch was performed *or requested*.
565 last_time
= GetLastSegSwitchTime();
567 last_xlog_switch_time
= Max(last_xlog_switch_time
, last_time
);
569 /* Now we can do the real check */
570 if ((int) (now
- last_xlog_switch_time
) >= XLogArchiveTimeout
)
572 XLogRecPtr switchpoint
;
574 /* OK, it's time to switch */
575 switchpoint
= RequestXLogSwitch();
578 * If the returned pointer points exactly to a segment boundary,
579 * assume nothing happened.
581 if ((switchpoint
.xrecoff
% XLogSegSize
) != 0)
583 (errmsg("transaction log switch forced (archive_timeout=%d)",
584 XLogArchiveTimeout
)));
587 * Update state in any case, so we don't retry constantly when the
590 last_xlog_switch_time
= now
;
595 * BgWriterNap -- Nap for the configured time or until a signal is received.
603 * Send off activity statistics to the stats collector
605 pgstat_send_bgwriter();
608 * Nap for the configured time, or sleep for 10 seconds if there is no
609 * bgwriter activity configured.
611 * On some platforms, signals won't interrupt the sleep. To ensure we
612 * respond reasonably promptly when someone signals us, break down the
613 * sleep into 1-second increments, and check for interrupts after each
616 * We absorb pending requests after each short sleep.
618 if (bgwriter_lru_maxpages
> 0 || ckpt_active
)
619 udelay
= BgWriterDelay
* 1000L;
620 else if (XLogArchiveTimeout
> 0)
621 udelay
= 1000000L; /* One second */
623 udelay
= 10000000L; /* Ten seconds */
625 while (udelay
> 999999L)
627 if (got_SIGHUP
|| shutdown_requested
||
628 (ckpt_active
? ImmediateCheckpointRequested() : checkpoint_requested
))
631 AbsorbFsyncRequests();
635 if (!(got_SIGHUP
|| shutdown_requested
||
636 (ckpt_active
? ImmediateCheckpointRequested() : checkpoint_requested
)))
641 * Returns true if an immediate checkpoint request is pending. (Note that
642 * this does not check the *current* checkpoint's IMMEDIATE flag, but whether
643 * there is one pending behind it.)
646 ImmediateCheckpointRequested(void)
648 if (checkpoint_requested
)
650 volatile BgWriterShmemStruct
*bgs
= BgWriterShmem
;
653 * We don't need to acquire the ckpt_lck in this case because we're
654 * only looking at a single flag bit.
656 if (bgs
->ckpt_flags
& CHECKPOINT_IMMEDIATE
)
663 * CheckpointWriteDelay -- yield control to bgwriter during a checkpoint
665 * This function is called after each page write performed by BufferSync().
666 * It is responsible for keeping the bgwriter's normal activities in
667 * progress during a long checkpoint, and for throttling BufferSync()'s
668 * write rate to hit checkpoint_completion_target.
670 * The checkpoint request flags should be passed in; currently the only one
671 * examined is CHECKPOINT_IMMEDIATE, which disables delays between writes.
673 * 'progress' is an estimate of how much of the work has been done, as a
674 * fraction between 0.0 meaning none, and 1.0 meaning all done.
677 CheckpointWriteDelay(int flags
, double progress
)
679 static int absorb_counter
= WRITES_PER_ABSORB
;
681 /* Do nothing if checkpoint is being executed by non-bgwriter process */
686 * Perform the usual bgwriter duties and take a nap, unless we're behind
687 * schedule, in which case we just try to catch up as quickly as possible.
689 if (!(flags
& CHECKPOINT_IMMEDIATE
) &&
690 !shutdown_requested
&&
691 !ImmediateCheckpointRequested() &&
692 IsCheckpointOnSchedule(progress
))
697 ProcessConfigFile(PGC_SIGHUP
);
700 AbsorbFsyncRequests();
701 absorb_counter
= WRITES_PER_ABSORB
;
704 CheckArchiveTimeout();
707 else if (--absorb_counter
<= 0)
710 * Absorb pending fsync requests after each WRITES_PER_ABSORB write
711 * operations even when we don't sleep, to prevent overflow of the
712 * fsync request queue.
714 AbsorbFsyncRequests();
715 absorb_counter
= WRITES_PER_ABSORB
;
720 * IsCheckpointOnSchedule -- are we on schedule to finish this checkpoint
723 * Compares the current progress against the time/segments elapsed since last
724 * checkpoint, and returns true if the progress we've made this far is greater
725 * than the elapsed time/segments.
728 IsCheckpointOnSchedule(double progress
)
732 double elapsed_xlogs
,
737 /* Scale progress according to checkpoint_completion_target. */
738 progress
*= CheckPointCompletionTarget
;
741 * Check against the cached value first. Only do the more expensive
742 * calculations once we reach the target previously calculated. Since
743 * neither time or WAL insert pointer moves backwards, a freshly
744 * calculated value can only be greater than or equal to the cached value.
746 if (progress
< ckpt_cached_elapsed
)
750 * Check progress against WAL segments written and checkpoint_segments.
752 * We compare the current WAL insert location against the location
753 * computed before calling CreateCheckPoint. The code in XLogInsert that
754 * actually triggers a checkpoint when checkpoint_segments is exceeded
755 * compares against RedoRecptr, so this is not completely accurate.
756 * However, it's good enough for our purposes, we're only calculating an
759 if (!RecoveryInProgress())
761 recptr
= GetInsertRecPtr();
763 (((double) (int32
) (recptr
.xlogid
- ckpt_start_recptr
.xlogid
)) * XLogSegsPerFile
+
764 ((double) recptr
.xrecoff
- (double) ckpt_start_recptr
.xrecoff
) / XLogSegSize
) /
767 if (progress
< elapsed_xlogs
)
769 ckpt_cached_elapsed
= elapsed_xlogs
;
775 * Check progress against time elapsed and checkpoint_timeout.
777 gettimeofday(&now
, NULL
);
778 elapsed_time
= ((double) ((pg_time_t
) now
.tv_sec
- ckpt_start_time
) +
779 now
.tv_usec
/ 1000000.0) / CheckPointTimeout
;
781 if (progress
< elapsed_time
)
783 ckpt_cached_elapsed
= elapsed_time
;
787 /* It looks like we're on schedule. */
792 /* --------------------------------
793 * signal handler routines
794 * --------------------------------
798 * bg_quickdie() occurs when signalled SIGQUIT by the postmaster.
800 * Some backend has bought the farm,
801 * so we need to stop what we're doing and exit.
804 bg_quickdie(SIGNAL_ARGS
)
806 PG_SETMASK(&BlockSig
);
809 * We DO NOT want to run proc_exit() callbacks -- we're here because
810 * shared memory may be corrupted, so we don't want to try to clean up our
811 * transaction. Just nail the windows shut and get out of town. Now that
812 * there's an atexit callback to prevent third-party code from breaking
813 * things by calling exit() directly, we have to reset the callbacks
814 * explicitly to make this work as intended.
819 * Note we do exit(2) not exit(0). This is to force the postmaster into a
820 * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
821 * backend. This is necessary precisely because we don't clean up our
822 * shared memory state. (The "dead man switch" mechanism in pmsignal.c
823 * should ensure the postmaster sees this as a crash, too, but no harm in
824 * being doubly sure.)
829 /* SIGHUP: set flag to re-read config file at next convenient time */
831 BgSigHupHandler(SIGNAL_ARGS
)
836 /* SIGINT: set flag to run a normal checkpoint right away */
838 ReqCheckpointHandler(SIGNAL_ARGS
)
840 checkpoint_requested
= true;
843 /* SIGUSR2: set flag to run a shutdown checkpoint and exit */
845 ReqShutdownHandler(SIGNAL_ARGS
)
847 shutdown_requested
= true;
851 /* --------------------------------
852 * communication with backends
853 * --------------------------------
858 * Compute space needed for bgwriter-related shared memory
861 BgWriterShmemSize(void)
866 * Currently, the size of the requests[] array is arbitrarily set equal to
867 * NBuffers. This may prove too large or small ...
869 size
= offsetof(BgWriterShmemStruct
, requests
);
870 size
= add_size(size
, mul_size(NBuffers
, sizeof(BgWriterRequest
)));
877 * Allocate and initialize bgwriter-related shared memory
880 BgWriterShmemInit(void)
884 BgWriterShmem
= (BgWriterShmemStruct
*)
885 ShmemInitStruct("Background Writer Data",
888 if (BgWriterShmem
== NULL
)
890 (errcode(ERRCODE_OUT_OF_MEMORY
),
891 errmsg("not enough shared memory for background writer")));
893 return; /* already initialized */
895 MemSet(BgWriterShmem
, 0, sizeof(BgWriterShmemStruct
));
896 SpinLockInit(&BgWriterShmem
->ckpt_lck
);
897 BgWriterShmem
->max_requests
= NBuffers
;
902 * Called in backend processes to request a checkpoint
904 * flags is a bitwise OR of the following:
905 * CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
906 * CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery.
907 * CHECKPOINT_IMMEDIATE: finish the checkpoint ASAP,
908 * ignoring checkpoint_completion_target parameter.
909 * CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occured
910 * since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
911 * CHECKPOINT_END_OF_RECOVERY).
912 * CHECKPOINT_WAIT: wait for completion before returning (otherwise,
913 * just signal bgwriter to do it, and return).
914 * CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling.
915 * (This affects logging, and in particular enables CheckPointWarning.)
918 RequestCheckpoint(int flags
)
920 /* use volatile pointer to prevent code rearrangement */
921 volatile BgWriterShmemStruct
*bgs
= BgWriterShmem
;
927 * If in a standalone backend, just do it ourselves.
929 if (!IsPostmasterEnvironment
)
932 * There's no point in doing slow checkpoints in a standalone backend,
933 * because there's no other backends the checkpoint could disrupt.
935 CreateCheckPoint(flags
| CHECKPOINT_IMMEDIATE
);
938 * After any checkpoint, close all smgr files. This is so we won't
939 * hang onto smgr references to deleted files indefinitely.
947 * Atomically set the request flags, and take a snapshot of the counters.
948 * When we see ckpt_started > old_started, we know the flags we set here
949 * have been seen by bgwriter.
951 * Note that we OR the flags with any existing flags, to avoid overriding
952 * a "stronger" request by another backend. The flag senses must be
953 * chosen to make this work!
955 SpinLockAcquire(&bgs
->ckpt_lck
);
957 old_failed
= bgs
->ckpt_failed
;
958 old_started
= bgs
->ckpt_started
;
959 bgs
->ckpt_flags
|= flags
;
961 SpinLockRelease(&bgs
->ckpt_lck
);
964 * Send signal to request checkpoint. It's possible that the bgwriter
965 * hasn't started yet, or is in process of restarting, so we will retry a
966 * few times if needed. Also, if not told to wait for the checkpoint to
967 * occur, we consider failure to send the signal to be nonfatal and merely
970 for (ntries
= 0;; ntries
++)
972 if (BgWriterShmem
->bgwriter_pid
== 0)
974 if (ntries
>= 20) /* max wait 2.0 sec */
976 elog((flags
& CHECKPOINT_WAIT
) ? ERROR
: LOG
,
977 "could not request checkpoint because bgwriter not running");
981 else if (kill(BgWriterShmem
->bgwriter_pid
, SIGINT
) != 0)
983 if (ntries
>= 20) /* max wait 2.0 sec */
985 elog((flags
& CHECKPOINT_WAIT
) ? ERROR
: LOG
,
986 "could not signal for checkpoint: %m");
991 break; /* signal sent successfully */
993 CHECK_FOR_INTERRUPTS();
994 pg_usleep(100000L); /* wait 0.1 sec, then retry */
998 * If requested, wait for completion. We detect completion according to
999 * the algorithm given above.
1001 if (flags
& CHECKPOINT_WAIT
)
1006 /* Wait for a new checkpoint to start. */
1009 SpinLockAcquire(&bgs
->ckpt_lck
);
1010 new_started
= bgs
->ckpt_started
;
1011 SpinLockRelease(&bgs
->ckpt_lck
);
1013 if (new_started
!= old_started
)
1016 CHECK_FOR_INTERRUPTS();
1021 * We are waiting for ckpt_done >= new_started, in a modulo sense.
1027 SpinLockAcquire(&bgs
->ckpt_lck
);
1028 new_done
= bgs
->ckpt_done
;
1029 new_failed
= bgs
->ckpt_failed
;
1030 SpinLockRelease(&bgs
->ckpt_lck
);
1032 if (new_done
- new_started
>= 0)
1035 CHECK_FOR_INTERRUPTS();
1039 if (new_failed
!= old_failed
)
1041 (errmsg("checkpoint request failed"),
1042 errhint("Consult recent messages in the server log for details.")));
1047 * ForwardFsyncRequest
1048 * Forward a file-fsync request from a backend to the bgwriter
1050 * Whenever a backend is compelled to write directly to a relation
1051 * (which should be seldom, if the bgwriter is getting its job done),
1052 * the backend calls this routine to pass over knowledge that the relation
1053 * is dirty and must be fsync'd before next checkpoint. We also use this
1054 * opportunity to count such writes for statistical purposes.
1056 * segno specifies which segment (not block!) of the relation needs to be
1057 * fsync'd. (Since the valid range is much less than BlockNumber, we can
1058 * use high values for special flags; that's all internal to md.c, which
1061 * If we are unable to pass over the request (at present, this can happen
1062 * if the shared memory queue is full), we return false. That forces
1063 * the backend to do its own fsync. We hope that will be even more seldom.
1065 * Note: we presently make no attempt to eliminate duplicate requests
1066 * in the requests[] queue. The bgwriter will have to eliminate dups
1067 * internally anyway, so we may as well avoid holding the lock longer
1068 * than we have to here.
1071 ForwardFsyncRequest(RelFileNode rnode
, ForkNumber forknum
, BlockNumber segno
)
1073 BgWriterRequest
*request
;
1075 if (!IsUnderPostmaster
)
1076 return false; /* probably shouldn't even get here */
1079 elog(ERROR
, "ForwardFsyncRequest must not be called in bgwriter");
1081 LWLockAcquire(BgWriterCommLock
, LW_EXCLUSIVE
);
1083 /* we count non-bgwriter writes even when the request queue overflows */
1084 BgWriterShmem
->num_backend_writes
++;
1086 if (BgWriterShmem
->bgwriter_pid
== 0 ||
1087 BgWriterShmem
->num_requests
>= BgWriterShmem
->max_requests
)
1089 LWLockRelease(BgWriterCommLock
);
1092 request
= &BgWriterShmem
->requests
[BgWriterShmem
->num_requests
++];
1093 request
->rnode
= rnode
;
1094 request
->forknum
= forknum
;
1095 request
->segno
= segno
;
1096 LWLockRelease(BgWriterCommLock
);
1101 * AbsorbFsyncRequests
1102 * Retrieve queued fsync requests and pass them to local smgr.
1104 * This is exported because it must be called during CreateCheckPoint;
1105 * we have to be sure we have accepted all pending requests just before
1106 * we start fsync'ing. Since CreateCheckPoint sometimes runs in
1107 * non-bgwriter processes, do nothing if not bgwriter.
1110 AbsorbFsyncRequests(void)
1112 BgWriterRequest
*requests
= NULL
;
1113 BgWriterRequest
*request
;
1120 * We have to PANIC if we fail to absorb all the pending requests (eg,
1121 * because our hashtable runs out of memory). This is because the system
1122 * cannot run safely if we are unable to fsync what we have been told to
1123 * fsync. Fortunately, the hashtable is so small that the problem is
1124 * quite unlikely to arise in practice.
1126 START_CRIT_SECTION();
1129 * We try to avoid holding the lock for a long time by copying the request
1132 LWLockAcquire(BgWriterCommLock
, LW_EXCLUSIVE
);
1134 /* Transfer write count into pending pgstats message */
1135 BgWriterStats
.m_buf_written_backend
+= BgWriterShmem
->num_backend_writes
;
1136 BgWriterShmem
->num_backend_writes
= 0;
1138 n
= BgWriterShmem
->num_requests
;
1141 requests
= (BgWriterRequest
*) palloc(n
* sizeof(BgWriterRequest
));
1142 memcpy(requests
, BgWriterShmem
->requests
, n
* sizeof(BgWriterRequest
));
1144 BgWriterShmem
->num_requests
= 0;
1146 LWLockRelease(BgWriterCommLock
);
1148 for (request
= requests
; n
> 0; request
++, n
--)
1149 RememberFsyncRequest(request
->rnode
, request
->forknum
, request
->segno
);