1 /*-------------------------------------------------------------------------
4 * Infrastructure for launching parallel workers
6 * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
10 * src/backend/access/transam/parallel.c
12 *-------------------------------------------------------------------------
17 #include "access/brin.h"
18 #include "access/nbtree.h"
19 #include "access/parallel.h"
20 #include "access/session.h"
21 #include "access/xact.h"
22 #include "access/xlog.h"
23 #include "catalog/index.h"
24 #include "catalog/namespace.h"
25 #include "catalog/pg_enum.h"
26 #include "catalog/storage.h"
27 #include "commands/async.h"
28 #include "commands/vacuum.h"
29 #include "executor/execParallel.h"
30 #include "libpq/libpq.h"
31 #include "libpq/pqformat.h"
32 #include "libpq/pqmq.h"
33 #include "miscadmin.h"
34 #include "optimizer/optimizer.h"
36 #include "storage/ipc.h"
37 #include "storage/predicate.h"
38 #include "storage/spin.h"
39 #include "tcop/tcopprot.h"
40 #include "utils/combocid.h"
41 #include "utils/guc.h"
42 #include "utils/inval.h"
43 #include "utils/memutils.h"
44 #include "utils/relmapper.h"
45 #include "utils/snapmgr.h"
48 * We don't want to waste a lot of memory on an error queue which, most of
49 * the time, will process only a handful of small messages. However, it is
50 * desirable to make it large enough that a typical ErrorResponse can be sent
51 * without blocking. That way, a worker that errors out can write the whole
52 * message into the queue and terminate without waiting for the user backend.
54 #define PARALLEL_ERROR_QUEUE_SIZE 16384
56 /* Magic number for parallel context TOC. */
57 #define PARALLEL_MAGIC 0x50477c7c
60 * Magic numbers for per-context parallel state sharing. Higher-level code
61 * should use smaller values, leaving these very large ones for use by this
64 #define PARALLEL_KEY_FIXED UINT64CONST(0xFFFFFFFFFFFF0001)
65 #define PARALLEL_KEY_ERROR_QUEUE UINT64CONST(0xFFFFFFFFFFFF0002)
66 #define PARALLEL_KEY_LIBRARY UINT64CONST(0xFFFFFFFFFFFF0003)
67 #define PARALLEL_KEY_GUC UINT64CONST(0xFFFFFFFFFFFF0004)
68 #define PARALLEL_KEY_COMBO_CID UINT64CONST(0xFFFFFFFFFFFF0005)
69 #define PARALLEL_KEY_TRANSACTION_SNAPSHOT UINT64CONST(0xFFFFFFFFFFFF0006)
70 #define PARALLEL_KEY_ACTIVE_SNAPSHOT UINT64CONST(0xFFFFFFFFFFFF0007)
71 #define PARALLEL_KEY_TRANSACTION_STATE UINT64CONST(0xFFFFFFFFFFFF0008)
72 #define PARALLEL_KEY_ENTRYPOINT UINT64CONST(0xFFFFFFFFFFFF0009)
73 #define PARALLEL_KEY_SESSION_DSM UINT64CONST(0xFFFFFFFFFFFF000A)
74 #define PARALLEL_KEY_PENDING_SYNCS UINT64CONST(0xFFFFFFFFFFFF000B)
75 #define PARALLEL_KEY_REINDEX_STATE UINT64CONST(0xFFFFFFFFFFFF000C)
76 #define PARALLEL_KEY_RELMAPPER_STATE UINT64CONST(0xFFFFFFFFFFFF000D)
77 #define PARALLEL_KEY_UNCOMMITTEDENUMS UINT64CONST(0xFFFFFFFFFFFF000E)
78 #define PARALLEL_KEY_CLIENTCONNINFO UINT64CONST(0xFFFFFFFFFFFF000F)
80 /* Fixed-size parallel state. */
81 typedef struct FixedParallelState
83 /* Fixed-size state that workers must restore. */
85 Oid authenticated_user_id
;
89 Oid temp_namespace_id
;
90 Oid temp_toast_namespace_id
;
92 bool session_user_is_superuser
;
93 bool role_is_superuser
;
94 PGPROC
*parallel_leader_pgproc
;
95 pid_t parallel_leader_pid
;
96 ProcNumber parallel_leader_proc_number
;
99 SerializableXactHandle serializable_xact_handle
;
101 /* Mutex protects remaining fields. */
104 /* Maximum XactLastRecEnd of any worker. */
105 XLogRecPtr last_xlog_end
;
106 } FixedParallelState
;
109 * Our parallel worker number. We initialize this to -1, meaning that we are
110 * not a parallel worker. In parallel workers, it will be set to a value >= 0
111 * and < the number of workers before any user code is invoked; each parallel
112 * worker will get a different parallel worker number.
114 int ParallelWorkerNumber
= -1;
116 /* Is there a parallel message pending which we need to receive? */
117 volatile sig_atomic_t ParallelMessagePending
= false;
119 /* Are we initializing a parallel worker? */
120 bool InitializingParallelWorker
= false;
122 /* Pointer to our fixed parallel state. */
123 static FixedParallelState
*MyFixedParallelState
;
125 /* List of active parallel contexts. */
126 static dlist_head pcxt_list
= DLIST_STATIC_INIT(pcxt_list
);
128 /* Backend-local copy of data from FixedParallelState. */
129 static pid_t ParallelLeaderPid
;
132 * List of internal parallel worker entry points. We need this for
133 * reasons explained in LookupParallelWorkerFunction(), below.
138 parallel_worker_main_type fn_addr
;
139 } InternalParallelWorkers
[] =
143 "ParallelQueryMain", ParallelQueryMain
146 "_bt_parallel_build_main", _bt_parallel_build_main
149 "_brin_parallel_build_main", _brin_parallel_build_main
152 "parallel_vacuum_main", parallel_vacuum_main
156 /* Private functions. */
157 static void HandleParallelMessage(ParallelContext
*pcxt
, int i
, StringInfo msg
);
158 static void WaitForParallelWorkersToExit(ParallelContext
*pcxt
);
159 static parallel_worker_main_type
LookupParallelWorkerFunction(const char *libraryname
, const char *funcname
);
160 static void ParallelWorkerShutdown(int code
, Datum arg
);
164 * Establish a new parallel context. This should be done after entering
165 * parallel mode, and (unless there is an error) the context should be
166 * destroyed before exiting the current subtransaction.
169 CreateParallelContext(const char *library_name
, const char *function_name
,
172 MemoryContext oldcontext
;
173 ParallelContext
*pcxt
;
175 /* It is unsafe to create a parallel context if not in parallel mode. */
176 Assert(IsInParallelMode());
178 /* Number of workers should be non-negative. */
179 Assert(nworkers
>= 0);
181 /* We might be running in a short-lived memory context. */
182 oldcontext
= MemoryContextSwitchTo(TopTransactionContext
);
184 /* Initialize a new ParallelContext. */
185 pcxt
= palloc0(sizeof(ParallelContext
));
186 pcxt
->subid
= GetCurrentSubTransactionId();
187 pcxt
->nworkers
= nworkers
;
188 pcxt
->nworkers_to_launch
= nworkers
;
189 pcxt
->library_name
= pstrdup(library_name
);
190 pcxt
->function_name
= pstrdup(function_name
);
191 pcxt
->error_context_stack
= error_context_stack
;
192 shm_toc_initialize_estimator(&pcxt
->estimator
);
193 dlist_push_head(&pcxt_list
, &pcxt
->node
);
195 /* Restore previous memory context. */
196 MemoryContextSwitchTo(oldcontext
);
202 * Establish the dynamic shared memory segment for a parallel context and
203 * copy state and other bookkeeping information that will be needed by
204 * parallel workers into it.
207 InitializeParallelDSM(ParallelContext
*pcxt
)
209 MemoryContext oldcontext
;
210 Size library_len
= 0;
212 Size combocidlen
= 0;
216 Size pendingsyncslen
= 0;
218 Size relmapperlen
= 0;
219 Size uncommittedenumslen
= 0;
220 Size clientconninfolen
= 0;
223 FixedParallelState
*fps
;
224 dsm_handle session_dsm_handle
= DSM_HANDLE_INVALID
;
225 Snapshot transaction_snapshot
= GetTransactionSnapshot();
226 Snapshot active_snapshot
= GetActiveSnapshot();
228 /* We might be running in a very short-lived memory context. */
229 oldcontext
= MemoryContextSwitchTo(TopTransactionContext
);
231 /* Allow space to store the fixed-size parallel state. */
232 shm_toc_estimate_chunk(&pcxt
->estimator
, sizeof(FixedParallelState
));
233 shm_toc_estimate_keys(&pcxt
->estimator
, 1);
236 * If we manage to reach here while non-interruptible, it's unsafe to
237 * launch any workers: we would fail to process interrupts sent by them.
238 * We can deal with that edge case by pretending no workers were
241 if (!INTERRUPTS_CAN_BE_PROCESSED())
245 * Normally, the user will have requested at least one worker process, but
246 * if by chance they have not, we can skip a bunch of things here.
248 if (pcxt
->nworkers
> 0)
250 /* Get (or create) the per-session DSM segment's handle. */
251 session_dsm_handle
= GetSessionDsmHandle();
254 * If we weren't able to create a per-session DSM segment, then we can
255 * continue but we can't safely launch any workers because their
256 * record typmods would be incompatible so they couldn't exchange
259 if (session_dsm_handle
== DSM_HANDLE_INVALID
)
263 if (pcxt
->nworkers
> 0)
265 /* Estimate space for various kinds of state sharing. */
266 library_len
= EstimateLibraryStateSpace();
267 shm_toc_estimate_chunk(&pcxt
->estimator
, library_len
);
268 guc_len
= EstimateGUCStateSpace();
269 shm_toc_estimate_chunk(&pcxt
->estimator
, guc_len
);
270 combocidlen
= EstimateComboCIDStateSpace();
271 shm_toc_estimate_chunk(&pcxt
->estimator
, combocidlen
);
272 if (IsolationUsesXactSnapshot())
274 tsnaplen
= EstimateSnapshotSpace(transaction_snapshot
);
275 shm_toc_estimate_chunk(&pcxt
->estimator
, tsnaplen
);
277 asnaplen
= EstimateSnapshotSpace(active_snapshot
);
278 shm_toc_estimate_chunk(&pcxt
->estimator
, asnaplen
);
279 tstatelen
= EstimateTransactionStateSpace();
280 shm_toc_estimate_chunk(&pcxt
->estimator
, tstatelen
);
281 shm_toc_estimate_chunk(&pcxt
->estimator
, sizeof(dsm_handle
));
282 pendingsyncslen
= EstimatePendingSyncsSpace();
283 shm_toc_estimate_chunk(&pcxt
->estimator
, pendingsyncslen
);
284 reindexlen
= EstimateReindexStateSpace();
285 shm_toc_estimate_chunk(&pcxt
->estimator
, reindexlen
);
286 relmapperlen
= EstimateRelationMapSpace();
287 shm_toc_estimate_chunk(&pcxt
->estimator
, relmapperlen
);
288 uncommittedenumslen
= EstimateUncommittedEnumsSpace();
289 shm_toc_estimate_chunk(&pcxt
->estimator
, uncommittedenumslen
);
290 clientconninfolen
= EstimateClientConnectionInfoSpace();
291 shm_toc_estimate_chunk(&pcxt
->estimator
, clientconninfolen
);
292 /* If you add more chunks here, you probably need to add keys. */
293 shm_toc_estimate_keys(&pcxt
->estimator
, 12);
295 /* Estimate space need for error queues. */
296 StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE
) ==
297 PARALLEL_ERROR_QUEUE_SIZE
,
298 "parallel error queue size not buffer-aligned");
299 shm_toc_estimate_chunk(&pcxt
->estimator
,
300 mul_size(PARALLEL_ERROR_QUEUE_SIZE
,
302 shm_toc_estimate_keys(&pcxt
->estimator
, 1);
304 /* Estimate how much we'll need for the entrypoint info. */
305 shm_toc_estimate_chunk(&pcxt
->estimator
, strlen(pcxt
->library_name
) +
306 strlen(pcxt
->function_name
) + 2);
307 shm_toc_estimate_keys(&pcxt
->estimator
, 1);
311 * Create DSM and initialize with new table of contents. But if the user
312 * didn't request any workers, then don't bother creating a dynamic shared
313 * memory segment; instead, just use backend-private memory.
315 * Also, if we can't create a dynamic shared memory segment because the
316 * maximum number of segments have already been created, then fall back to
317 * backend-private memory, and plan not to use any workers. We hope this
318 * won't happen very often, but it's better to abandon the use of
319 * parallelism than to fail outright.
321 segsize
= shm_toc_estimate(&pcxt
->estimator
);
322 if (pcxt
->nworkers
> 0)
323 pcxt
->seg
= dsm_create(segsize
, DSM_CREATE_NULL_IF_MAXSEGMENTS
);
324 if (pcxt
->seg
!= NULL
)
325 pcxt
->toc
= shm_toc_create(PARALLEL_MAGIC
,
326 dsm_segment_address(pcxt
->seg
),
331 pcxt
->private_memory
= MemoryContextAlloc(TopMemoryContext
, segsize
);
332 pcxt
->toc
= shm_toc_create(PARALLEL_MAGIC
, pcxt
->private_memory
,
336 /* Initialize fixed-size state in shared memory. */
337 fps
= (FixedParallelState
*)
338 shm_toc_allocate(pcxt
->toc
, sizeof(FixedParallelState
));
339 fps
->database_id
= MyDatabaseId
;
340 fps
->authenticated_user_id
= GetAuthenticatedUserId();
341 fps
->session_user_id
= GetSessionUserId();
342 fps
->outer_user_id
= GetCurrentRoleId();
343 GetUserIdAndSecContext(&fps
->current_user_id
, &fps
->sec_context
);
344 fps
->session_user_is_superuser
= GetSessionUserIsSuperuser();
345 fps
->role_is_superuser
= current_role_is_superuser
;
346 GetTempNamespaceState(&fps
->temp_namespace_id
,
347 &fps
->temp_toast_namespace_id
);
348 fps
->parallel_leader_pgproc
= MyProc
;
349 fps
->parallel_leader_pid
= MyProcPid
;
350 fps
->parallel_leader_proc_number
= MyProcNumber
;
351 fps
->xact_ts
= GetCurrentTransactionStartTimestamp();
352 fps
->stmt_ts
= GetCurrentStatementStartTimestamp();
353 fps
->serializable_xact_handle
= ShareSerializableXact();
354 SpinLockInit(&fps
->mutex
);
355 fps
->last_xlog_end
= 0;
356 shm_toc_insert(pcxt
->toc
, PARALLEL_KEY_FIXED
, fps
);
358 /* We can skip the rest of this if we're not budgeting for any workers. */
359 if (pcxt
->nworkers
> 0)
367 char *pendingsyncsspace
;
369 char *relmapperspace
;
370 char *error_queue_space
;
371 char *session_dsm_handle_space
;
372 char *entrypointstate
;
373 char *uncommittedenumsspace
;
374 char *clientconninfospace
;
377 /* Serialize shared libraries we have loaded. */
378 libraryspace
= shm_toc_allocate(pcxt
->toc
, library_len
);
379 SerializeLibraryState(library_len
, libraryspace
);
380 shm_toc_insert(pcxt
->toc
, PARALLEL_KEY_LIBRARY
, libraryspace
);
382 /* Serialize GUC settings. */
383 gucspace
= shm_toc_allocate(pcxt
->toc
, guc_len
);
384 SerializeGUCState(guc_len
, gucspace
);
385 shm_toc_insert(pcxt
->toc
, PARALLEL_KEY_GUC
, gucspace
);
387 /* Serialize combo CID state. */
388 combocidspace
= shm_toc_allocate(pcxt
->toc
, combocidlen
);
389 SerializeComboCIDState(combocidlen
, combocidspace
);
390 shm_toc_insert(pcxt
->toc
, PARALLEL_KEY_COMBO_CID
, combocidspace
);
393 * Serialize the transaction snapshot if the transaction isolation
394 * level uses a transaction snapshot.
396 if (IsolationUsesXactSnapshot())
398 tsnapspace
= shm_toc_allocate(pcxt
->toc
, tsnaplen
);
399 SerializeSnapshot(transaction_snapshot
, tsnapspace
);
400 shm_toc_insert(pcxt
->toc
, PARALLEL_KEY_TRANSACTION_SNAPSHOT
,
404 /* Serialize the active snapshot. */
405 asnapspace
= shm_toc_allocate(pcxt
->toc
, asnaplen
);
406 SerializeSnapshot(active_snapshot
, asnapspace
);
407 shm_toc_insert(pcxt
->toc
, PARALLEL_KEY_ACTIVE_SNAPSHOT
, asnapspace
);
409 /* Provide the handle for per-session segment. */
410 session_dsm_handle_space
= shm_toc_allocate(pcxt
->toc
,
412 *(dsm_handle
*) session_dsm_handle_space
= session_dsm_handle
;
413 shm_toc_insert(pcxt
->toc
, PARALLEL_KEY_SESSION_DSM
,
414 session_dsm_handle_space
);
416 /* Serialize transaction state. */
417 tstatespace
= shm_toc_allocate(pcxt
->toc
, tstatelen
);
418 SerializeTransactionState(tstatelen
, tstatespace
);
419 shm_toc_insert(pcxt
->toc
, PARALLEL_KEY_TRANSACTION_STATE
, tstatespace
);
421 /* Serialize pending syncs. */
422 pendingsyncsspace
= shm_toc_allocate(pcxt
->toc
, pendingsyncslen
);
423 SerializePendingSyncs(pendingsyncslen
, pendingsyncsspace
);
424 shm_toc_insert(pcxt
->toc
, PARALLEL_KEY_PENDING_SYNCS
,
427 /* Serialize reindex state. */
428 reindexspace
= shm_toc_allocate(pcxt
->toc
, reindexlen
);
429 SerializeReindexState(reindexlen
, reindexspace
);
430 shm_toc_insert(pcxt
->toc
, PARALLEL_KEY_REINDEX_STATE
, reindexspace
);
432 /* Serialize relmapper state. */
433 relmapperspace
= shm_toc_allocate(pcxt
->toc
, relmapperlen
);
434 SerializeRelationMap(relmapperlen
, relmapperspace
);
435 shm_toc_insert(pcxt
->toc
, PARALLEL_KEY_RELMAPPER_STATE
,
438 /* Serialize uncommitted enum state. */
439 uncommittedenumsspace
= shm_toc_allocate(pcxt
->toc
,
440 uncommittedenumslen
);
441 SerializeUncommittedEnums(uncommittedenumsspace
, uncommittedenumslen
);
442 shm_toc_insert(pcxt
->toc
, PARALLEL_KEY_UNCOMMITTEDENUMS
,
443 uncommittedenumsspace
);
445 /* Serialize our ClientConnectionInfo. */
446 clientconninfospace
= shm_toc_allocate(pcxt
->toc
, clientconninfolen
);
447 SerializeClientConnectionInfo(clientconninfolen
, clientconninfospace
);
448 shm_toc_insert(pcxt
->toc
, PARALLEL_KEY_CLIENTCONNINFO
,
449 clientconninfospace
);
451 /* Allocate space for worker information. */
452 pcxt
->worker
= palloc0(sizeof(ParallelWorkerInfo
) * pcxt
->nworkers
);
455 * Establish error queues in dynamic shared memory.
457 * These queues should be used only for transmitting ErrorResponse,
458 * NoticeResponse, and NotifyResponse protocol messages. Tuple data
459 * should be transmitted via separate (possibly larger?) queues.
462 shm_toc_allocate(pcxt
->toc
,
463 mul_size(PARALLEL_ERROR_QUEUE_SIZE
,
465 for (i
= 0; i
< pcxt
->nworkers
; ++i
)
470 start
= error_queue_space
+ i
* PARALLEL_ERROR_QUEUE_SIZE
;
471 mq
= shm_mq_create(start
, PARALLEL_ERROR_QUEUE_SIZE
);
472 shm_mq_set_receiver(mq
, MyProc
);
473 pcxt
->worker
[i
].error_mqh
= shm_mq_attach(mq
, pcxt
->seg
, NULL
);
475 shm_toc_insert(pcxt
->toc
, PARALLEL_KEY_ERROR_QUEUE
, error_queue_space
);
478 * Serialize entrypoint information. It's unsafe to pass function
479 * pointers across processes, as the function pointer may be different
480 * in each process in EXEC_BACKEND builds, so we always pass library
481 * and function name. (We use library name "postgres" for functions
482 * in the core backend.)
484 lnamelen
= strlen(pcxt
->library_name
);
485 entrypointstate
= shm_toc_allocate(pcxt
->toc
, lnamelen
+
486 strlen(pcxt
->function_name
) + 2);
487 strcpy(entrypointstate
, pcxt
->library_name
);
488 strcpy(entrypointstate
+ lnamelen
+ 1, pcxt
->function_name
);
489 shm_toc_insert(pcxt
->toc
, PARALLEL_KEY_ENTRYPOINT
, entrypointstate
);
492 /* Update nworkers_to_launch, in case we changed nworkers above. */
493 pcxt
->nworkers_to_launch
= pcxt
->nworkers
;
495 /* Restore previous memory context. */
496 MemoryContextSwitchTo(oldcontext
);
500 * Reinitialize the dynamic shared memory segment for a parallel context such
501 * that we could launch workers for it again.
504 ReinitializeParallelDSM(ParallelContext
*pcxt
)
506 FixedParallelState
*fps
;
508 /* Wait for any old workers to exit. */
509 if (pcxt
->nworkers_launched
> 0)
511 WaitForParallelWorkersToFinish(pcxt
);
512 WaitForParallelWorkersToExit(pcxt
);
513 pcxt
->nworkers_launched
= 0;
514 if (pcxt
->known_attached_workers
)
516 pfree(pcxt
->known_attached_workers
);
517 pcxt
->known_attached_workers
= NULL
;
518 pcxt
->nknown_attached_workers
= 0;
522 /* Reset a few bits of fixed parallel state to a clean state. */
523 fps
= shm_toc_lookup(pcxt
->toc
, PARALLEL_KEY_FIXED
, false);
524 fps
->last_xlog_end
= 0;
526 /* Recreate error queues (if they exist). */
527 if (pcxt
->nworkers
> 0)
529 char *error_queue_space
;
533 shm_toc_lookup(pcxt
->toc
, PARALLEL_KEY_ERROR_QUEUE
, false);
534 for (i
= 0; i
< pcxt
->nworkers
; ++i
)
539 start
= error_queue_space
+ i
* PARALLEL_ERROR_QUEUE_SIZE
;
540 mq
= shm_mq_create(start
, PARALLEL_ERROR_QUEUE_SIZE
);
541 shm_mq_set_receiver(mq
, MyProc
);
542 pcxt
->worker
[i
].error_mqh
= shm_mq_attach(mq
, pcxt
->seg
, NULL
);
548 * Reinitialize parallel workers for a parallel context such that we could
549 * launch a different number of workers. This is required for cases where
550 * we need to reuse the same DSM segment, but the number of workers can
551 * vary from run-to-run.
554 ReinitializeParallelWorkers(ParallelContext
*pcxt
, int nworkers_to_launch
)
557 * The number of workers that need to be launched must be less than the
558 * number of workers with which the parallel context is initialized. But
559 * the caller might not know that InitializeParallelDSM reduced nworkers,
560 * so just silently trim the request.
562 pcxt
->nworkers_to_launch
= Min(pcxt
->nworkers
, nworkers_to_launch
);
566 * Launch parallel workers.
569 LaunchParallelWorkers(ParallelContext
*pcxt
)
571 MemoryContext oldcontext
;
572 BackgroundWorker worker
;
574 bool any_registrations_failed
= false;
576 /* Skip this if we have no workers. */
577 if (pcxt
->nworkers
== 0 || pcxt
->nworkers_to_launch
== 0)
580 /* We need to be a lock group leader. */
581 BecomeLockGroupLeader();
583 /* If we do have workers, we'd better have a DSM segment. */
584 Assert(pcxt
->seg
!= NULL
);
586 /* We might be running in a short-lived memory context. */
587 oldcontext
= MemoryContextSwitchTo(TopTransactionContext
);
589 /* Configure a worker. */
590 memset(&worker
, 0, sizeof(worker
));
591 snprintf(worker
.bgw_name
, BGW_MAXLEN
, "parallel worker for PID %d",
593 snprintf(worker
.bgw_type
, BGW_MAXLEN
, "parallel worker");
595 BGWORKER_SHMEM_ACCESS
| BGWORKER_BACKEND_DATABASE_CONNECTION
596 | BGWORKER_CLASS_PARALLEL
;
597 worker
.bgw_start_time
= BgWorkerStart_ConsistentState
;
598 worker
.bgw_restart_time
= BGW_NEVER_RESTART
;
599 sprintf(worker
.bgw_library_name
, "postgres");
600 sprintf(worker
.bgw_function_name
, "ParallelWorkerMain");
601 worker
.bgw_main_arg
= UInt32GetDatum(dsm_segment_handle(pcxt
->seg
));
602 worker
.bgw_notify_pid
= MyProcPid
;
607 * The caller must be able to tolerate ending up with fewer workers than
608 * expected, so there is no need to throw an error here if registration
609 * fails. It wouldn't help much anyway, because registering the worker in
610 * no way guarantees that it will start up and initialize successfully.
612 for (i
= 0; i
< pcxt
->nworkers_to_launch
; ++i
)
614 memcpy(worker
.bgw_extra
, &i
, sizeof(int));
615 if (!any_registrations_failed
&&
616 RegisterDynamicBackgroundWorker(&worker
,
617 &pcxt
->worker
[i
].bgwhandle
))
619 shm_mq_set_handle(pcxt
->worker
[i
].error_mqh
,
620 pcxt
->worker
[i
].bgwhandle
);
621 pcxt
->nworkers_launched
++;
626 * If we weren't able to register the worker, then we've bumped up
627 * against the max_worker_processes limit, and future
628 * registrations will probably fail too, so arrange to skip them.
629 * But we still have to execute this code for the remaining slots
630 * to make sure that we forget about the error queues we budgeted
631 * for those workers. Otherwise, we'll wait for them to start,
632 * but they never will.
634 any_registrations_failed
= true;
635 pcxt
->worker
[i
].bgwhandle
= NULL
;
636 shm_mq_detach(pcxt
->worker
[i
].error_mqh
);
637 pcxt
->worker
[i
].error_mqh
= NULL
;
642 * Now that nworkers_launched has taken its final value, we can initialize
643 * known_attached_workers.
645 if (pcxt
->nworkers_launched
> 0)
647 pcxt
->known_attached_workers
=
648 palloc0(sizeof(bool) * pcxt
->nworkers_launched
);
649 pcxt
->nknown_attached_workers
= 0;
652 /* Restore previous memory context. */
653 MemoryContextSwitchTo(oldcontext
);
657 * Wait for all workers to attach to their error queues, and throw an error if
658 * any worker fails to do this.
660 * Callers can assume that if this function returns successfully, then the
661 * number of workers given by pcxt->nworkers_launched have initialized and
662 * attached to their error queues. Whether or not these workers are guaranteed
663 * to still be running depends on what code the caller asked them to run;
664 * this function does not guarantee that they have not exited. However, it
665 * does guarantee that any workers which exited must have done so cleanly and
666 * after successfully performing the work with which they were tasked.
668 * If this function is not called, then some of the workers that were launched
669 * may not have been started due to a fork() failure, or may have exited during
670 * early startup prior to attaching to the error queue, so nworkers_launched
671 * cannot be viewed as completely reliable. It will never be less than the
672 * number of workers which actually started, but it might be more. Any workers
673 * that failed to start will still be discovered by
674 * WaitForParallelWorkersToFinish and an error will be thrown at that time,
675 * provided that function is eventually reached.
677 * In general, the leader process should do as much work as possible before
678 * calling this function. fork() failures and other early-startup failures
679 * are very uncommon, and having the leader sit idle when it could be doing
680 * useful work is undesirable. However, if the leader needs to wait for
681 * all of its workers or for a specific worker, it may want to call this
682 * function before doing so. If not, it must make some other provision for
683 * the failure-to-start case, lest it wait forever. On the other hand, a
684 * leader which never waits for a worker that might not be started yet, or
685 * at least never does so prior to WaitForParallelWorkersToFinish(), need not
686 * call this function at all.
689 WaitForParallelWorkersToAttach(ParallelContext
*pcxt
)
693 /* Skip this if we have no launched workers. */
694 if (pcxt
->nworkers_launched
== 0)
700 * This will process any parallel messages that are pending and it may
701 * also throw an error propagated from a worker.
703 CHECK_FOR_INTERRUPTS();
705 for (i
= 0; i
< pcxt
->nworkers_launched
; ++i
)
707 BgwHandleStatus status
;
712 if (pcxt
->known_attached_workers
[i
])
716 * If error_mqh is NULL, then the worker has already exited
719 if (pcxt
->worker
[i
].error_mqh
== NULL
)
721 pcxt
->known_attached_workers
[i
] = true;
722 ++pcxt
->nknown_attached_workers
;
726 status
= GetBackgroundWorkerPid(pcxt
->worker
[i
].bgwhandle
, &pid
);
727 if (status
== BGWH_STARTED
)
729 /* Has the worker attached to the error queue? */
730 mq
= shm_mq_get_queue(pcxt
->worker
[i
].error_mqh
);
731 if (shm_mq_get_sender(mq
) != NULL
)
733 /* Yes, so it is known to be attached. */
734 pcxt
->known_attached_workers
[i
] = true;
735 ++pcxt
->nknown_attached_workers
;
738 else if (status
== BGWH_STOPPED
)
741 * If the worker stopped without attaching to the error queue,
744 mq
= shm_mq_get_queue(pcxt
->worker
[i
].error_mqh
);
745 if (shm_mq_get_sender(mq
) == NULL
)
747 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE
),
748 errmsg("parallel worker failed to initialize"),
749 errhint("More details may be available in the server log.")));
751 pcxt
->known_attached_workers
[i
] = true;
752 ++pcxt
->nknown_attached_workers
;
757 * Worker not yet started, so we must wait. The postmaster
758 * will notify us if the worker's state changes. Our latch
759 * might also get set for some other reason, but if so we'll
760 * just end up waiting for the same worker again.
762 rc
= WaitLatch(MyLatch
,
763 WL_LATCH_SET
| WL_EXIT_ON_PM_DEATH
,
764 -1, WAIT_EVENT_BGWORKER_STARTUP
);
766 if (rc
& WL_LATCH_SET
)
771 /* If all workers are known to have started, we're done. */
772 if (pcxt
->nknown_attached_workers
>= pcxt
->nworkers_launched
)
774 Assert(pcxt
->nknown_attached_workers
== pcxt
->nworkers_launched
);
781 * Wait for all workers to finish computing.
783 * Even if the parallel operation seems to have completed successfully, it's
784 * important to call this function afterwards. We must not miss any errors
785 * the workers may have thrown during the parallel operation, or any that they
786 * may yet throw while shutting down.
788 * Also, we want to update our notion of XactLastRecEnd based on worker
792 WaitForParallelWorkersToFinish(ParallelContext
*pcxt
)
796 bool anyone_alive
= false;
801 * This will process any parallel messages that are pending, which may
802 * change the outcome of the loop that follows. It may also throw an
803 * error propagated from a worker.
805 CHECK_FOR_INTERRUPTS();
807 for (i
= 0; i
< pcxt
->nworkers_launched
; ++i
)
810 * If error_mqh is NULL, then the worker has already exited
811 * cleanly. If we have received a message through error_mqh from
812 * the worker, we know it started up cleanly, and therefore we're
813 * certain to be notified when it exits.
815 if (pcxt
->worker
[i
].error_mqh
== NULL
)
817 else if (pcxt
->known_attached_workers
[i
])
826 /* If all workers are known to have finished, we're done. */
827 if (nfinished
>= pcxt
->nworkers_launched
)
829 Assert(nfinished
== pcxt
->nworkers_launched
);
834 * We didn't detect any living workers, but not all workers are
835 * known to have exited cleanly. Either not all workers have
836 * launched yet, or maybe some of them failed to start or
837 * terminated abnormally.
839 for (i
= 0; i
< pcxt
->nworkers_launched
; ++i
)
845 * If the worker is BGWH_NOT_YET_STARTED or BGWH_STARTED, we
846 * should just keep waiting. If it is BGWH_STOPPED, then
847 * further investigation is needed.
849 if (pcxt
->worker
[i
].error_mqh
== NULL
||
850 pcxt
->worker
[i
].bgwhandle
== NULL
||
851 GetBackgroundWorkerPid(pcxt
->worker
[i
].bgwhandle
,
852 &pid
) != BGWH_STOPPED
)
856 * Check whether the worker ended up stopped without ever
857 * attaching to the error queue. If so, the postmaster was
858 * unable to fork the worker or it exited without initializing
859 * properly. We must throw an error, since the caller may
860 * have been expecting the worker to do some work before
863 mq
= shm_mq_get_queue(pcxt
->worker
[i
].error_mqh
);
864 if (shm_mq_get_sender(mq
) == NULL
)
866 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE
),
867 errmsg("parallel worker failed to initialize"),
868 errhint("More details may be available in the server log.")));
871 * The worker is stopped, but is attached to the error queue.
872 * Unless there's a bug somewhere, this will only happen when
873 * the worker writes messages and terminates after the
874 * CHECK_FOR_INTERRUPTS() near the top of this function and
875 * before the call to GetBackgroundWorkerPid(). In that case,
876 * or latch should have been set as well and the right things
877 * will happen on the next pass through the loop.
882 (void) WaitLatch(MyLatch
, WL_LATCH_SET
| WL_EXIT_ON_PM_DEATH
, -1,
883 WAIT_EVENT_PARALLEL_FINISH
);
887 if (pcxt
->toc
!= NULL
)
889 FixedParallelState
*fps
;
891 fps
= shm_toc_lookup(pcxt
->toc
, PARALLEL_KEY_FIXED
, false);
892 if (fps
->last_xlog_end
> XactLastRecEnd
)
893 XactLastRecEnd
= fps
->last_xlog_end
;
898 * Wait for all workers to exit.
900 * This function ensures that workers have been completely shutdown. The
901 * difference between WaitForParallelWorkersToFinish and this function is
902 * that the former just ensures that last message sent by a worker backend is
903 * received by the leader backend whereas this ensures the complete shutdown.
906 WaitForParallelWorkersToExit(ParallelContext
*pcxt
)
910 /* Wait until the workers actually die. */
911 for (i
= 0; i
< pcxt
->nworkers_launched
; ++i
)
913 BgwHandleStatus status
;
915 if (pcxt
->worker
== NULL
|| pcxt
->worker
[i
].bgwhandle
== NULL
)
918 status
= WaitForBackgroundWorkerShutdown(pcxt
->worker
[i
].bgwhandle
);
921 * If the postmaster kicked the bucket, we have no chance of cleaning
922 * up safely -- we won't be able to tell when our workers are actually
923 * dead. This doesn't necessitate a PANIC since they will all abort
924 * eventually, but we can't safely continue this session.
926 if (status
== BGWH_POSTMASTER_DIED
)
928 (errcode(ERRCODE_ADMIN_SHUTDOWN
),
929 errmsg("postmaster exited during a parallel transaction")));
931 /* Release memory. */
932 pfree(pcxt
->worker
[i
].bgwhandle
);
933 pcxt
->worker
[i
].bgwhandle
= NULL
;
938 * Destroy a parallel context.
940 * If expecting a clean exit, you should use WaitForParallelWorkersToFinish()
941 * first, before calling this function. When this function is invoked, any
942 * remaining workers are forcibly killed; the dynamic shared memory segment
943 * is unmapped; and we then wait (uninterruptibly) for the workers to exit.
946 DestroyParallelContext(ParallelContext
*pcxt
)
951 * Be careful about order of operations here! We remove the parallel
952 * context from the list before we do anything else; otherwise, if an
953 * error occurs during a subsequent step, we might try to nuke it again
954 * from AtEOXact_Parallel or AtEOSubXact_Parallel.
956 dlist_delete(&pcxt
->node
);
958 /* Kill each worker in turn, and forget their error queues. */
959 if (pcxt
->worker
!= NULL
)
961 for (i
= 0; i
< pcxt
->nworkers_launched
; ++i
)
963 if (pcxt
->worker
[i
].error_mqh
!= NULL
)
965 TerminateBackgroundWorker(pcxt
->worker
[i
].bgwhandle
);
967 shm_mq_detach(pcxt
->worker
[i
].error_mqh
);
968 pcxt
->worker
[i
].error_mqh
= NULL
;
974 * If we have allocated a shared memory segment, detach it. This will
975 * implicitly detach the error queues, and any other shared memory queues,
978 if (pcxt
->seg
!= NULL
)
980 dsm_detach(pcxt
->seg
);
985 * If this parallel context is actually in backend-private memory rather
986 * than shared memory, free that memory instead.
988 if (pcxt
->private_memory
!= NULL
)
990 pfree(pcxt
->private_memory
);
991 pcxt
->private_memory
= NULL
;
995 * We can't finish transaction commit or abort until all of the workers
996 * have exited. This means, in particular, that we can't respond to
997 * interrupts at this stage.
1000 WaitForParallelWorkersToExit(pcxt
);
1001 RESUME_INTERRUPTS();
1003 /* Free the worker array itself. */
1004 if (pcxt
->worker
!= NULL
)
1006 pfree(pcxt
->worker
);
1007 pcxt
->worker
= NULL
;
1011 pfree(pcxt
->library_name
);
1012 pfree(pcxt
->function_name
);
1017 * Are there any parallel contexts currently active?
1020 ParallelContextActive(void)
1022 return !dlist_is_empty(&pcxt_list
);
1026 * Handle receipt of an interrupt indicating a parallel worker message.
1028 * Note: this is called within a signal handler! All we can do is set
1029 * a flag that will cause the next CHECK_FOR_INTERRUPTS() to invoke
1030 * HandleParallelMessages().
1033 HandleParallelMessageInterrupt(void)
1035 InterruptPending
= true;
1036 ParallelMessagePending
= true;
1041 * Handle any queued protocol messages received from parallel workers.
1044 HandleParallelMessages(void)
1047 MemoryContext oldcontext
;
1049 static MemoryContext hpm_context
= NULL
;
1052 * This is invoked from ProcessInterrupts(), and since some of the
1053 * functions it calls contain CHECK_FOR_INTERRUPTS(), there is a potential
1054 * for recursive calls if more signals are received while this runs. It's
1055 * unclear that recursive entry would be safe, and it doesn't seem useful
1056 * even if it is safe, so let's block interrupts until done.
1061 * Moreover, CurrentMemoryContext might be pointing almost anywhere. We
1062 * don't want to risk leaking data into long-lived contexts, so let's do
1063 * our work here in a private context that we can reset on each use.
1065 if (hpm_context
== NULL
) /* first time through? */
1066 hpm_context
= AllocSetContextCreate(TopMemoryContext
,
1067 "HandleParallelMessages",
1068 ALLOCSET_DEFAULT_SIZES
);
1070 MemoryContextReset(hpm_context
);
1072 oldcontext
= MemoryContextSwitchTo(hpm_context
);
1074 /* OK to process messages. Reset the flag saying there are more to do. */
1075 ParallelMessagePending
= false;
1077 dlist_foreach(iter
, &pcxt_list
)
1079 ParallelContext
*pcxt
;
1082 pcxt
= dlist_container(ParallelContext
, node
, iter
.cur
);
1083 if (pcxt
->worker
== NULL
)
1086 for (i
= 0; i
< pcxt
->nworkers_launched
; ++i
)
1089 * Read as many messages as we can from each worker, but stop when
1090 * either (1) the worker's error queue goes away, which can happen
1091 * if we receive a Terminate message from the worker; or (2) no
1092 * more messages can be read from the worker without blocking.
1094 while (pcxt
->worker
[i
].error_mqh
!= NULL
)
1100 res
= shm_mq_receive(pcxt
->worker
[i
].error_mqh
, &nbytes
,
1102 if (res
== SHM_MQ_WOULD_BLOCK
)
1104 else if (res
== SHM_MQ_SUCCESS
)
1108 initStringInfo(&msg
);
1109 appendBinaryStringInfo(&msg
, data
, nbytes
);
1110 HandleParallelMessage(pcxt
, i
, &msg
);
1115 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE
),
1116 errmsg("lost connection to parallel worker")));
1121 MemoryContextSwitchTo(oldcontext
);
1123 /* Might as well clear the context on our way out */
1124 MemoryContextReset(hpm_context
);
1126 RESUME_INTERRUPTS();
1130 * Handle a single protocol message received from a single parallel worker.
1133 HandleParallelMessage(ParallelContext
*pcxt
, int i
, StringInfo msg
)
1137 if (pcxt
->known_attached_workers
!= NULL
&&
1138 !pcxt
->known_attached_workers
[i
])
1140 pcxt
->known_attached_workers
[i
] = true;
1141 pcxt
->nknown_attached_workers
++;
1144 msgtype
= pq_getmsgbyte(msg
);
1148 case PqMsg_ErrorResponse
:
1149 case PqMsg_NoticeResponse
:
1152 ErrorContextCallback
*save_error_context_stack
;
1154 /* Parse ErrorResponse or NoticeResponse. */
1155 pq_parse_errornotice(msg
, &edata
);
1157 /* Death of a worker isn't enough justification for suicide. */
1158 edata
.elevel
= Min(edata
.elevel
, ERROR
);
1161 * If desired, add a context line to show that this is a
1162 * message propagated from a parallel worker. Otherwise, it
1163 * can sometimes be confusing to understand what actually
1164 * happened. (We don't do this in DEBUG_PARALLEL_REGRESS mode
1165 * because it causes test-result instability depending on
1166 * whether a parallel worker is actually used or not.)
1168 if (debug_parallel_query
!= DEBUG_PARALLEL_REGRESS
)
1171 edata
.context
= psprintf("%s\n%s", edata
.context
,
1172 _("parallel worker"));
1174 edata
.context
= pstrdup(_("parallel worker"));
1178 * Context beyond that should use the error context callbacks
1179 * that were in effect when the ParallelContext was created,
1180 * not the current ones.
1182 save_error_context_stack
= error_context_stack
;
1183 error_context_stack
= pcxt
->error_context_stack
;
1185 /* Rethrow error or print notice. */
1186 ThrowErrorData(&edata
);
1188 /* Not an error, so restore previous context stack. */
1189 error_context_stack
= save_error_context_stack
;
1194 case PqMsg_NotificationResponse
:
1196 /* Propagate NotifyResponse. */
1198 const char *channel
;
1199 const char *payload
;
1201 pid
= pq_getmsgint(msg
, 4);
1202 channel
= pq_getmsgrawstring(msg
);
1203 payload
= pq_getmsgrawstring(msg
);
1206 NotifyMyFrontEnd(channel
, payload
, pid
);
1211 case 'P': /* Parallel progress reporting */
1214 * Only incremental progress reporting is currently supported.
1215 * However, it's possible to add more fields to the message to
1216 * allow for handling of other backend progress APIs.
1218 int index
= pq_getmsgint(msg
, 4);
1219 int64 incr
= pq_getmsgint64(msg
);
1223 pgstat_progress_incr_param(index
, incr
);
1228 case PqMsg_Terminate
:
1230 shm_mq_detach(pcxt
->worker
[i
].error_mqh
);
1231 pcxt
->worker
[i
].error_mqh
= NULL
;
1237 elog(ERROR
, "unrecognized message type received from parallel worker: %c (message length %d bytes)",
1244 * End-of-subtransaction cleanup for parallel contexts.
1246 * Here we remove only parallel contexts initiated within the current
1250 AtEOSubXact_Parallel(bool isCommit
, SubTransactionId mySubId
)
1252 while (!dlist_is_empty(&pcxt_list
))
1254 ParallelContext
*pcxt
;
1256 pcxt
= dlist_head_element(ParallelContext
, node
, &pcxt_list
);
1257 if (pcxt
->subid
!= mySubId
)
1260 elog(WARNING
, "leaked parallel context");
1261 DestroyParallelContext(pcxt
);
1266 * End-of-transaction cleanup for parallel contexts.
1268 * We nuke all remaining parallel contexts.
1271 AtEOXact_Parallel(bool isCommit
)
1273 while (!dlist_is_empty(&pcxt_list
))
1275 ParallelContext
*pcxt
;
1277 pcxt
= dlist_head_element(ParallelContext
, node
, &pcxt_list
);
1279 elog(WARNING
, "leaked parallel context");
1280 DestroyParallelContext(pcxt
);
1285 * Main entrypoint for parallel workers.
1288 ParallelWorkerMain(Datum main_arg
)
1292 FixedParallelState
*fps
;
1293 char *error_queue_space
;
1297 char *entrypointstate
;
1299 char *function_name
;
1300 parallel_worker_main_type entrypt
;
1302 char *combocidspace
;
1306 char *pendingsyncsspace
;
1308 char *relmapperspace
;
1309 char *uncommittedenumsspace
;
1310 char *clientconninfospace
;
1311 char *session_dsm_handle_space
;
1315 /* Set flag to indicate that we're initializing a parallel worker. */
1316 InitializingParallelWorker
= true;
1318 /* Establish signal handlers. */
1319 pqsignal(SIGTERM
, die
);
1320 BackgroundWorkerUnblockSignals();
1322 /* Determine and set our parallel worker number. */
1323 Assert(ParallelWorkerNumber
== -1);
1324 memcpy(&ParallelWorkerNumber
, MyBgworkerEntry
->bgw_extra
, sizeof(int));
1326 /* Set up a memory context to work in, just for cleanliness. */
1327 CurrentMemoryContext
= AllocSetContextCreate(TopMemoryContext
,
1329 ALLOCSET_DEFAULT_SIZES
);
1332 * Attach to the dynamic shared memory segment for the parallel query, and
1333 * find its table of contents.
1335 * Note: at this point, we have not created any ResourceOwner in this
1336 * process. This will result in our DSM mapping surviving until process
1337 * exit, which is fine. If there were a ResourceOwner, it would acquire
1338 * ownership of the mapping, but we have no need for that.
1340 seg
= dsm_attach(DatumGetUInt32(main_arg
));
1343 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE
),
1344 errmsg("could not map dynamic shared memory segment")));
1345 toc
= shm_toc_attach(PARALLEL_MAGIC
, dsm_segment_address(seg
));
1348 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE
),
1349 errmsg("invalid magic number in dynamic shared memory segment")));
1351 /* Look up fixed parallel state. */
1352 fps
= shm_toc_lookup(toc
, PARALLEL_KEY_FIXED
, false);
1353 MyFixedParallelState
= fps
;
1355 /* Arrange to signal the leader if we exit. */
1356 ParallelLeaderPid
= fps
->parallel_leader_pid
;
1357 ParallelLeaderProcNumber
= fps
->parallel_leader_proc_number
;
1358 before_shmem_exit(ParallelWorkerShutdown
, PointerGetDatum(seg
));
1361 * Now we can find and attach to the error queue provided for us. That's
1362 * good, because until we do that, any errors that happen here will not be
1363 * reported back to the process that requested that this worker be
1366 error_queue_space
= shm_toc_lookup(toc
, PARALLEL_KEY_ERROR_QUEUE
, false);
1367 mq
= (shm_mq
*) (error_queue_space
+
1368 ParallelWorkerNumber
* PARALLEL_ERROR_QUEUE_SIZE
);
1369 shm_mq_set_sender(mq
, MyProc
);
1370 mqh
= shm_mq_attach(mq
, seg
, NULL
);
1371 pq_redirect_to_shm_mq(seg
, mqh
);
1372 pq_set_parallel_leader(fps
->parallel_leader_pid
,
1373 fps
->parallel_leader_proc_number
);
1376 * Hooray! Primary initialization is complete. Now, we need to set up our
1377 * backend-local state to match the original backend.
1381 * Join locking group. We must do this before anything that could try to
1382 * acquire a heavyweight lock, because any heavyweight locks acquired to
1383 * this point could block either directly against the parallel group
1384 * leader or against some process which in turn waits for a lock that
1385 * conflicts with the parallel group leader, causing an undetected
1386 * deadlock. (If we can't join the lock group, the leader has gone away,
1387 * so just exit quietly.)
1389 if (!BecomeLockGroupMember(fps
->parallel_leader_pgproc
,
1390 fps
->parallel_leader_pid
))
1394 * Restore transaction and statement start-time timestamps. This must
1395 * happen before anything that would start a transaction, else asserts in
1398 SetParallelStartTimestamps(fps
->xact_ts
, fps
->stmt_ts
);
1401 * Identify the entry point to be called. In theory this could result in
1402 * loading an additional library, though most likely the entry point is in
1403 * the core backend or in a library we just loaded.
1405 entrypointstate
= shm_toc_lookup(toc
, PARALLEL_KEY_ENTRYPOINT
, false);
1406 library_name
= entrypointstate
;
1407 function_name
= entrypointstate
+ strlen(library_name
) + 1;
1409 entrypt
= LookupParallelWorkerFunction(library_name
, function_name
);
1412 * Restore current session authorization and role id. No verification
1413 * happens here, we just blindly adopt the leader's state. Note that this
1414 * has to happen before InitPostgres, since InitializeSessionUserId will
1415 * not set these variables.
1417 SetAuthenticatedUserId(fps
->authenticated_user_id
);
1418 SetSessionAuthorization(fps
->session_user_id
,
1419 fps
->session_user_is_superuser
);
1420 SetCurrentRoleId(fps
->outer_user_id
, fps
->role_is_superuser
);
1422 /* Restore database connection. */
1423 BackgroundWorkerInitializeConnectionByOid(fps
->database_id
,
1424 fps
->authenticated_user_id
,
1428 * Set the client encoding to the database encoding, since that is what
1429 * the leader will expect.
1431 SetClientEncoding(GetDatabaseEncoding());
1434 * Load libraries that were loaded by original backend. We want to do
1435 * this before restoring GUCs, because the libraries might define custom
1438 libraryspace
= shm_toc_lookup(toc
, PARALLEL_KEY_LIBRARY
, false);
1439 StartTransactionCommand();
1440 RestoreLibraryState(libraryspace
);
1442 /* Restore GUC values from launching backend. */
1443 gucspace
= shm_toc_lookup(toc
, PARALLEL_KEY_GUC
, false);
1444 RestoreGUCState(gucspace
);
1445 CommitTransactionCommand();
1447 /* Crank up a transaction state appropriate to a parallel worker. */
1448 tstatespace
= shm_toc_lookup(toc
, PARALLEL_KEY_TRANSACTION_STATE
, false);
1449 StartParallelWorkerTransaction(tstatespace
);
1451 /* Restore combo CID state. */
1452 combocidspace
= shm_toc_lookup(toc
, PARALLEL_KEY_COMBO_CID
, false);
1453 RestoreComboCIDState(combocidspace
);
1455 /* Attach to the per-session DSM segment and contained objects. */
1456 session_dsm_handle_space
=
1457 shm_toc_lookup(toc
, PARALLEL_KEY_SESSION_DSM
, false);
1458 AttachSession(*(dsm_handle
*) session_dsm_handle_space
);
1461 * If the transaction isolation level is REPEATABLE READ or SERIALIZABLE,
1462 * the leader has serialized the transaction snapshot and we must restore
1463 * it. At lower isolation levels, there is no transaction-lifetime
1464 * snapshot, but we need TransactionXmin to get set to a value which is
1465 * less than or equal to the xmin of every snapshot that will be used by
1466 * this worker. The easiest way to accomplish that is to install the
1467 * active snapshot as the transaction snapshot. Code running in this
1468 * parallel worker might take new snapshots via GetTransactionSnapshot()
1469 * or GetLatestSnapshot(), but it shouldn't have any way of acquiring a
1470 * snapshot older than the active snapshot.
1472 asnapspace
= shm_toc_lookup(toc
, PARALLEL_KEY_ACTIVE_SNAPSHOT
, false);
1473 tsnapspace
= shm_toc_lookup(toc
, PARALLEL_KEY_TRANSACTION_SNAPSHOT
, true);
1474 asnapshot
= RestoreSnapshot(asnapspace
);
1475 tsnapshot
= tsnapspace
? RestoreSnapshot(tsnapspace
) : asnapshot
;
1476 RestoreTransactionSnapshot(tsnapshot
,
1477 fps
->parallel_leader_pgproc
);
1478 PushActiveSnapshot(asnapshot
);
1481 * We've changed which tuples we can see, and must therefore invalidate
1484 InvalidateSystemCaches();
1487 * Restore current user ID and security context. No verification happens
1488 * here, we just blindly adopt the leader's state. We can't do this till
1489 * after restoring GUCs, else we'll get complaints about restoring
1490 * session_authorization and role. (In effect, we're assuming that all
1491 * the restored values are okay to set, even if we are now inside a
1492 * restricted context.)
1494 SetUserIdAndSecContext(fps
->current_user_id
, fps
->sec_context
);
1496 /* Restore temp-namespace state to ensure search path matches leader's. */
1497 SetTempNamespaceState(fps
->temp_namespace_id
,
1498 fps
->temp_toast_namespace_id
);
1500 /* Restore pending syncs. */
1501 pendingsyncsspace
= shm_toc_lookup(toc
, PARALLEL_KEY_PENDING_SYNCS
,
1503 RestorePendingSyncs(pendingsyncsspace
);
1505 /* Restore reindex state. */
1506 reindexspace
= shm_toc_lookup(toc
, PARALLEL_KEY_REINDEX_STATE
, false);
1507 RestoreReindexState(reindexspace
);
1509 /* Restore relmapper state. */
1510 relmapperspace
= shm_toc_lookup(toc
, PARALLEL_KEY_RELMAPPER_STATE
, false);
1511 RestoreRelationMap(relmapperspace
);
1513 /* Restore uncommitted enums. */
1514 uncommittedenumsspace
= shm_toc_lookup(toc
, PARALLEL_KEY_UNCOMMITTEDENUMS
,
1516 RestoreUncommittedEnums(uncommittedenumsspace
);
1518 /* Restore the ClientConnectionInfo. */
1519 clientconninfospace
= shm_toc_lookup(toc
, PARALLEL_KEY_CLIENTCONNINFO
,
1521 RestoreClientConnectionInfo(clientconninfospace
);
1524 * Initialize SystemUser now that MyClientConnectionInfo is restored. Also
1525 * ensure that auth_method is actually valid, aka authn_id is not NULL.
1527 if (MyClientConnectionInfo
.authn_id
)
1528 InitializeSystemUser(MyClientConnectionInfo
.authn_id
,
1529 hba_authname(MyClientConnectionInfo
.auth_method
));
1531 /* Attach to the leader's serializable transaction, if SERIALIZABLE. */
1532 AttachSerializableXact(fps
->serializable_xact_handle
);
1535 * We've initialized all of our state now; nothing should change
1538 InitializingParallelWorker
= false;
1539 EnterParallelMode();
1542 * Time to do the real work: invoke the caller-supplied code.
1546 /* Must exit parallel mode to pop active snapshot. */
1549 /* Must pop active snapshot so snapmgr.c doesn't complain. */
1550 PopActiveSnapshot();
1552 /* Shut down the parallel-worker transaction. */
1553 EndParallelWorkerTransaction();
1555 /* Detach from the per-session DSM segment. */
1558 /* Report success. */
1559 pq_putmessage(PqMsg_Terminate
, NULL
, 0);
1563 * Update shared memory with the ending location of the last WAL record we
1564 * wrote, if it's greater than the value already stored there.
1567 ParallelWorkerReportLastRecEnd(XLogRecPtr last_xlog_end
)
1569 FixedParallelState
*fps
= MyFixedParallelState
;
1571 Assert(fps
!= NULL
);
1572 SpinLockAcquire(&fps
->mutex
);
1573 if (fps
->last_xlog_end
< last_xlog_end
)
1574 fps
->last_xlog_end
= last_xlog_end
;
1575 SpinLockRelease(&fps
->mutex
);
1579 * Make sure the leader tries to read from our error queue one more time.
1580 * This guards against the case where we exit uncleanly without sending an
1581 * ErrorResponse to the leader, for example because some code calls proc_exit
1584 * Also explicitly detach from dsm segment so that subsystems using
1585 * on_dsm_detach() have a chance to send stats before the stats subsystem is
1586 * shut down as part of a before_shmem_exit() hook.
1588 * One might think this could instead be solved by carefully ordering the
1589 * attaching to dsm segments, so that the pgstats segments get detached from
1590 * later than the parallel query one. That turns out to not work because the
1591 * stats hash might need to grow which can cause new segments to be allocated,
1592 * which then will be detached from earlier.
1595 ParallelWorkerShutdown(int code
, Datum arg
)
1597 SendProcSignal(ParallelLeaderPid
,
1598 PROCSIG_PARALLEL_MESSAGE
,
1599 ParallelLeaderProcNumber
);
1601 dsm_detach((dsm_segment
*) DatumGetPointer(arg
));
1605 * Look up (and possibly load) a parallel worker entry point function.
1607 * For functions contained in the core code, we use library name "postgres"
1608 * and consult the InternalParallelWorkers array. External functions are
1609 * looked up, and loaded if necessary, using load_external_function().
1611 * The point of this is to pass function names as strings across process
1612 * boundaries. We can't pass actual function addresses because of the
1613 * possibility that the function has been loaded at a different address
1614 * in a different process. This is obviously a hazard for functions in
1615 * loadable libraries, but it can happen even for functions in the core code
1616 * on platforms using EXEC_BACKEND (e.g., Windows).
1618 * At some point it might be worthwhile to get rid of InternalParallelWorkers[]
1619 * in favor of applying load_external_function() for core functions too;
1620 * but that raises portability issues that are not worth addressing now.
1622 static parallel_worker_main_type
1623 LookupParallelWorkerFunction(const char *libraryname
, const char *funcname
)
1626 * If the function is to be loaded from postgres itself, search the
1627 * InternalParallelWorkers array.
1629 if (strcmp(libraryname
, "postgres") == 0)
1633 for (i
= 0; i
< lengthof(InternalParallelWorkers
); i
++)
1635 if (strcmp(InternalParallelWorkers
[i
].fn_name
, funcname
) == 0)
1636 return InternalParallelWorkers
[i
].fn_addr
;
1639 /* We can only reach this by programming error. */
1640 elog(ERROR
, "internal function \"%s\" not found", funcname
);
1643 /* Otherwise load from external library. */
1644 return (parallel_worker_main_type
)
1645 load_external_function(libraryname
, funcname
, true, NULL
);