nbtree: fix read page recheck typo.
[pgsql.git] / src / backend / storage / ipc / procsignal.c
blob87027f27eb7a9fc4d016b3c1e27fc477a25bcfc7
1 /*-------------------------------------------------------------------------
3 * procsignal.c
4 * Routines for interprocess signaling
7 * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
10 * IDENTIFICATION
11 * src/backend/storage/ipc/procsignal.c
13 *-------------------------------------------------------------------------
15 #include "postgres.h"
17 #include <signal.h>
18 #include <unistd.h>
20 #include "access/parallel.h"
21 #include "commands/async.h"
22 #include "miscadmin.h"
23 #include "pgstat.h"
24 #include "port/pg_bitutils.h"
25 #include "replication/logicalworker.h"
26 #include "replication/walsender.h"
27 #include "storage/condition_variable.h"
28 #include "storage/ipc.h"
29 #include "storage/latch.h"
30 #include "storage/shmem.h"
31 #include "storage/sinval.h"
32 #include "storage/smgr.h"
33 #include "tcop/tcopprot.h"
34 #include "utils/memutils.h"
37 * The SIGUSR1 signal is multiplexed to support signaling multiple event
38 * types. The specific reason is communicated via flags in shared memory.
39 * We keep a boolean flag for each possible "reason", so that different
40 * reasons can be signaled to a process concurrently. (However, if the same
41 * reason is signaled more than once nearly simultaneously, the process may
42 * observe it only once.)
44 * Each process that wants to receive signals registers its process ID
45 * in the ProcSignalSlots array. The array is indexed by ProcNumber to make
46 * slot allocation simple, and to avoid having to search the array when you
47 * know the ProcNumber of the process you're signaling. (We do support
48 * signaling without ProcNumber, but it's a bit less efficient.)
50 * The fields in each slot are protected by a spinlock, pss_mutex. pss_pid can
51 * also be read without holding the spinlock, as a quick preliminary check
52 * when searching for a particular PID in the array.
54 * pss_signalFlags are intended to be set in cases where we don't need to
55 * keep track of whether or not the target process has handled the signal,
56 * but sometimes we need confirmation, as when making a global state change
57 * that cannot be considered complete until all backends have taken notice
58 * of it. For such use cases, we set a bit in pss_barrierCheckMask and then
59 * increment the current "barrier generation"; when the new barrier generation
60 * (or greater) appears in the pss_barrierGeneration flag of every process,
61 * we know that the message has been received everywhere.
63 typedef struct
65 pg_atomic_uint32 pss_pid;
66 bool pss_cancel_key_valid;
67 int32 pss_cancel_key;
68 volatile sig_atomic_t pss_signalFlags[NUM_PROCSIGNALS];
69 slock_t pss_mutex; /* protects the above fields */
71 /* Barrier-related fields (not protected by pss_mutex) */
72 pg_atomic_uint64 pss_barrierGeneration;
73 pg_atomic_uint32 pss_barrierCheckMask;
74 ConditionVariable pss_barrierCV;
75 } ProcSignalSlot;
78 * Information that is global to the entire ProcSignal system can be stored
79 * here.
81 * psh_barrierGeneration is the highest barrier generation in existence.
83 struct ProcSignalHeader
85 pg_atomic_uint64 psh_barrierGeneration;
86 ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER];
90 * We reserve a slot for each possible ProcNumber, plus one for each
91 * possible auxiliary process type. (This scheme assumes there is not
92 * more than one of any auxiliary process type at a time.)
94 #define NumProcSignalSlots (MaxBackends + NUM_AUXILIARY_PROCS)
96 /* Check whether the relevant type bit is set in the flags. */
97 #define BARRIER_SHOULD_CHECK(flags, type) \
98 (((flags) & (((uint32) 1) << (uint32) (type))) != 0)
100 /* Clear the relevant type bit from the flags. */
101 #define BARRIER_CLEAR_BIT(flags, type) \
102 ((flags) &= ~(((uint32) 1) << (uint32) (type)))
104 NON_EXEC_STATIC ProcSignalHeader *ProcSignal = NULL;
105 static ProcSignalSlot *MyProcSignalSlot = NULL;
107 static bool CheckProcSignal(ProcSignalReason reason);
108 static void CleanupProcSignalState(int status, Datum arg);
109 static void ResetProcSignalBarrierBits(uint32 flags);
112 * ProcSignalShmemSize
113 * Compute space needed for ProcSignal's shared memory
115 Size
116 ProcSignalShmemSize(void)
118 Size size;
120 size = mul_size(NumProcSignalSlots, sizeof(ProcSignalSlot));
121 size = add_size(size, offsetof(ProcSignalHeader, psh_slot));
122 return size;
126 * ProcSignalShmemInit
127 * Allocate and initialize ProcSignal's shared memory
129 void
130 ProcSignalShmemInit(void)
132 Size size = ProcSignalShmemSize();
133 bool found;
135 ProcSignal = (ProcSignalHeader *)
136 ShmemInitStruct("ProcSignal", size, &found);
138 /* If we're first, initialize. */
139 if (!found)
141 int i;
143 pg_atomic_init_u64(&ProcSignal->psh_barrierGeneration, 0);
145 for (i = 0; i < NumProcSignalSlots; ++i)
147 ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
149 SpinLockInit(&slot->pss_mutex);
150 pg_atomic_init_u32(&slot->pss_pid, 0);
151 slot->pss_cancel_key_valid = false;
152 slot->pss_cancel_key = 0;
153 MemSet(slot->pss_signalFlags, 0, sizeof(slot->pss_signalFlags));
154 pg_atomic_init_u64(&slot->pss_barrierGeneration, PG_UINT64_MAX);
155 pg_atomic_init_u32(&slot->pss_barrierCheckMask, 0);
156 ConditionVariableInit(&slot->pss_barrierCV);
162 * ProcSignalInit
163 * Register the current process in the ProcSignal array
165 void
166 ProcSignalInit(bool cancel_key_valid, int32 cancel_key)
168 ProcSignalSlot *slot;
169 uint64 barrier_generation;
171 if (MyProcNumber < 0)
172 elog(ERROR, "MyProcNumber not set");
173 if (MyProcNumber >= NumProcSignalSlots)
174 elog(ERROR, "unexpected MyProcNumber %d in ProcSignalInit (max %d)", MyProcNumber, NumProcSignalSlots);
175 slot = &ProcSignal->psh_slot[MyProcNumber];
177 /* sanity check */
178 SpinLockAcquire(&slot->pss_mutex);
179 if (pg_atomic_read_u32(&slot->pss_pid) != 0)
181 SpinLockRelease(&slot->pss_mutex);
182 elog(LOG, "process %d taking over ProcSignal slot %d, but it's not empty",
183 MyProcPid, MyProcNumber);
186 /* Clear out any leftover signal reasons */
187 MemSet(slot->pss_signalFlags, 0, NUM_PROCSIGNALS * sizeof(sig_atomic_t));
190 * Initialize barrier state. Since we're a brand-new process, there
191 * shouldn't be any leftover backend-private state that needs to be
192 * updated. Therefore, we can broadcast the latest barrier generation and
193 * disregard any previously-set check bits.
195 * NB: This only works if this initialization happens early enough in the
196 * startup sequence that we haven't yet cached any state that might need
197 * to be invalidated. That's also why we have a memory barrier here, to be
198 * sure that any later reads of memory happen strictly after this.
200 pg_atomic_write_u32(&slot->pss_barrierCheckMask, 0);
201 barrier_generation =
202 pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration);
203 pg_atomic_write_u64(&slot->pss_barrierGeneration, barrier_generation);
205 slot->pss_cancel_key_valid = cancel_key_valid;
206 slot->pss_cancel_key = cancel_key;
207 pg_atomic_write_u32(&slot->pss_pid, MyProcPid);
209 SpinLockRelease(&slot->pss_mutex);
211 /* Remember slot location for CheckProcSignal */
212 MyProcSignalSlot = slot;
214 /* Set up to release the slot on process exit */
215 on_shmem_exit(CleanupProcSignalState, (Datum) 0);
219 * CleanupProcSignalState
220 * Remove current process from ProcSignal mechanism
222 * This function is called via on_shmem_exit() during backend shutdown.
224 static void
225 CleanupProcSignalState(int status, Datum arg)
227 pid_t old_pid;
228 ProcSignalSlot *slot = MyProcSignalSlot;
231 * Clear MyProcSignalSlot, so that a SIGUSR1 received after this point
232 * won't try to access it after it's no longer ours (and perhaps even
233 * after we've unmapped the shared memory segment).
235 Assert(MyProcSignalSlot != NULL);
236 MyProcSignalSlot = NULL;
238 /* sanity check */
239 SpinLockAcquire(&slot->pss_mutex);
240 old_pid = pg_atomic_read_u32(&slot->pss_pid);
241 if (old_pid != MyProcPid)
244 * don't ERROR here. We're exiting anyway, and don't want to get into
245 * infinite loop trying to exit
247 SpinLockRelease(&slot->pss_mutex);
248 elog(LOG, "process %d releasing ProcSignal slot %d, but it contains %d",
249 MyProcPid, (int) (slot - ProcSignal->psh_slot), (int) old_pid);
250 return; /* XXX better to zero the slot anyway? */
253 /* Mark the slot as unused */
254 pg_atomic_write_u32(&slot->pss_pid, 0);
255 slot->pss_cancel_key_valid = false;
256 slot->pss_cancel_key = 0;
259 * Make this slot look like it's absorbed all possible barriers, so that
260 * no barrier waits block on it.
262 pg_atomic_write_u64(&slot->pss_barrierGeneration, PG_UINT64_MAX);
264 SpinLockRelease(&slot->pss_mutex);
266 ConditionVariableBroadcast(&slot->pss_barrierCV);
270 * SendProcSignal
271 * Send a signal to a Postgres process
273 * Providing procNumber is optional, but it will speed up the operation.
275 * On success (a signal was sent), zero is returned.
276 * On error, -1 is returned, and errno is set (typically to ESRCH or EPERM).
278 * Not to be confused with ProcSendSignal
281 SendProcSignal(pid_t pid, ProcSignalReason reason, ProcNumber procNumber)
283 volatile ProcSignalSlot *slot;
285 if (procNumber != INVALID_PROC_NUMBER)
287 Assert(procNumber < NumProcSignalSlots);
288 slot = &ProcSignal->psh_slot[procNumber];
290 SpinLockAcquire(&slot->pss_mutex);
291 if (pg_atomic_read_u32(&slot->pss_pid) == pid)
293 /* Atomically set the proper flag */
294 slot->pss_signalFlags[reason] = true;
295 SpinLockRelease(&slot->pss_mutex);
296 /* Send signal */
297 return kill(pid, SIGUSR1);
299 SpinLockRelease(&slot->pss_mutex);
301 else
304 * procNumber not provided, so search the array using pid. We search
305 * the array back to front so as to reduce search overhead. Passing
306 * INVALID_PROC_NUMBER means that the target is most likely an
307 * auxiliary process, which will have a slot near the end of the
308 * array.
310 int i;
312 for (i = NumProcSignalSlots - 1; i >= 0; i--)
314 slot = &ProcSignal->psh_slot[i];
316 if (pg_atomic_read_u32(&slot->pss_pid) == pid)
318 SpinLockAcquire(&slot->pss_mutex);
319 if (pg_atomic_read_u32(&slot->pss_pid) == pid)
321 /* Atomically set the proper flag */
322 slot->pss_signalFlags[reason] = true;
323 SpinLockRelease(&slot->pss_mutex);
324 /* Send signal */
325 return kill(pid, SIGUSR1);
327 SpinLockRelease(&slot->pss_mutex);
332 errno = ESRCH;
333 return -1;
337 * EmitProcSignalBarrier
338 * Send a signal to every Postgres process
340 * The return value of this function is the barrier "generation" created
341 * by this operation. This value can be passed to WaitForProcSignalBarrier
342 * to wait until it is known that every participant in the ProcSignal
343 * mechanism has absorbed the signal (or started afterwards).
345 * Note that it would be a bad idea to use this for anything that happens
346 * frequently, as interrupting every backend could cause a noticeable
347 * performance hit.
349 * Callers are entitled to assume that this function will not throw ERROR
350 * or FATAL.
352 uint64
353 EmitProcSignalBarrier(ProcSignalBarrierType type)
355 uint32 flagbit = 1 << (uint32) type;
356 uint64 generation;
359 * Set all the flags.
361 * Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this is
362 * totally ordered with respect to anything the caller did before, and
363 * anything that we do afterwards. (This is also true of the later call to
364 * pg_atomic_add_fetch_u64.)
366 for (int i = 0; i < NumProcSignalSlots; i++)
368 volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
370 pg_atomic_fetch_or_u32(&slot->pss_barrierCheckMask, flagbit);
374 * Increment the generation counter.
376 generation =
377 pg_atomic_add_fetch_u64(&ProcSignal->psh_barrierGeneration, 1);
380 * Signal all the processes, so that they update their advertised barrier
381 * generation.
383 * Concurrency is not a problem here. Backends that have exited don't
384 * matter, and new backends that have joined since we entered this
385 * function must already have current state, since the caller is
386 * responsible for making sure that the relevant state is entirely visible
387 * before calling this function in the first place. We still have to wake
388 * them up - because we can't distinguish between such backends and older
389 * backends that need to update state - but they won't actually need to
390 * change any state.
392 for (int i = NumProcSignalSlots - 1; i >= 0; i--)
394 volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
395 pid_t pid = pg_atomic_read_u32(&slot->pss_pid);
397 if (pid != 0)
399 SpinLockAcquire(&slot->pss_mutex);
400 pid = pg_atomic_read_u32(&slot->pss_pid);
401 if (pid != 0)
403 /* see SendProcSignal for details */
404 slot->pss_signalFlags[PROCSIG_BARRIER] = true;
405 SpinLockRelease(&slot->pss_mutex);
406 kill(pid, SIGUSR1);
408 else
409 SpinLockRelease(&slot->pss_mutex);
413 return generation;
417 * WaitForProcSignalBarrier - wait until it is guaranteed that all changes
418 * requested by a specific call to EmitProcSignalBarrier() have taken effect.
420 void
421 WaitForProcSignalBarrier(uint64 generation)
423 Assert(generation <= pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration));
425 elog(DEBUG1,
426 "waiting for all backends to process ProcSignalBarrier generation "
427 UINT64_FORMAT,
428 generation);
430 for (int i = NumProcSignalSlots - 1; i >= 0; i--)
432 ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
433 uint64 oldval;
436 * It's important that we check only pss_barrierGeneration here and
437 * not pss_barrierCheckMask. Bits in pss_barrierCheckMask get cleared
438 * before the barrier is actually absorbed, but pss_barrierGeneration
439 * is updated only afterward.
441 oldval = pg_atomic_read_u64(&slot->pss_barrierGeneration);
442 while (oldval < generation)
444 if (ConditionVariableTimedSleep(&slot->pss_barrierCV,
445 5000,
446 WAIT_EVENT_PROC_SIGNAL_BARRIER))
447 ereport(LOG,
448 (errmsg("still waiting for backend with PID %d to accept ProcSignalBarrier",
449 (int) pg_atomic_read_u32(&slot->pss_pid))));
450 oldval = pg_atomic_read_u64(&slot->pss_barrierGeneration);
452 ConditionVariableCancelSleep();
455 elog(DEBUG1,
456 "finished waiting for all backends to process ProcSignalBarrier generation "
457 UINT64_FORMAT,
458 generation);
461 * The caller is probably calling this function because it wants to read
462 * the shared state or perform further writes to shared state once all
463 * backends are known to have absorbed the barrier. However, the read of
464 * pss_barrierGeneration was performed unlocked; insert a memory barrier
465 * to separate it from whatever follows.
467 pg_memory_barrier();
471 * Handle receipt of an interrupt indicating a global barrier event.
473 * All the actual work is deferred to ProcessProcSignalBarrier(), because we
474 * cannot safely access the barrier generation inside the signal handler as
475 * 64bit atomics might use spinlock based emulation, even for reads. As this
476 * routine only gets called when PROCSIG_BARRIER is sent that won't cause a
477 * lot of unnecessary work.
479 static void
480 HandleProcSignalBarrierInterrupt(void)
482 InterruptPending = true;
483 ProcSignalBarrierPending = true;
484 /* latch will be set by procsignal_sigusr1_handler */
488 * Perform global barrier related interrupt checking.
490 * Any backend that participates in ProcSignal signaling must arrange to
491 * call this function periodically. It is called from CHECK_FOR_INTERRUPTS(),
492 * which is enough for normal backends, but not necessarily for all types of
493 * background processes.
495 void
496 ProcessProcSignalBarrier(void)
498 uint64 local_gen;
499 uint64 shared_gen;
500 volatile uint32 flags;
502 Assert(MyProcSignalSlot);
504 /* Exit quickly if there's no work to do. */
505 if (!ProcSignalBarrierPending)
506 return;
507 ProcSignalBarrierPending = false;
510 * It's not unlikely to process multiple barriers at once, before the
511 * signals for all the barriers have arrived. To avoid unnecessary work in
512 * response to subsequent signals, exit early if we already have processed
513 * all of them.
515 local_gen = pg_atomic_read_u64(&MyProcSignalSlot->pss_barrierGeneration);
516 shared_gen = pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration);
518 Assert(local_gen <= shared_gen);
520 if (local_gen == shared_gen)
521 return;
524 * Get and clear the flags that are set for this backend. Note that
525 * pg_atomic_exchange_u32 is a full barrier, so we're guaranteed that the
526 * read of the barrier generation above happens before we atomically
527 * extract the flags, and that any subsequent state changes happen
528 * afterward.
530 * NB: In order to avoid race conditions, we must zero
531 * pss_barrierCheckMask first and only afterwards try to do barrier
532 * processing. If we did it in the other order, someone could send us
533 * another barrier of some type right after we called the
534 * barrier-processing function but before we cleared the bit. We would
535 * have no way of knowing that the bit needs to stay set in that case, so
536 * the need to call the barrier-processing function again would just get
537 * forgotten. So instead, we tentatively clear all the bits and then put
538 * back any for which we don't manage to successfully absorb the barrier.
540 flags = pg_atomic_exchange_u32(&MyProcSignalSlot->pss_barrierCheckMask, 0);
543 * If there are no flags set, then we can skip doing any real work.
544 * Otherwise, establish a PG_TRY block, so that we don't lose track of
545 * which types of barrier processing are needed if an ERROR occurs.
547 if (flags != 0)
549 bool success = true;
551 PG_TRY();
554 * Process each type of barrier. The barrier-processing functions
555 * should normally return true, but may return false if the
556 * barrier can't be absorbed at the current time. This should be
557 * rare, because it's pretty expensive. Every single
558 * CHECK_FOR_INTERRUPTS() will return here until we manage to
559 * absorb the barrier, and that cost will add up in a hurry.
561 * NB: It ought to be OK to call the barrier-processing functions
562 * unconditionally, but it's more efficient to call only the ones
563 * that might need us to do something based on the flags.
565 while (flags != 0)
567 ProcSignalBarrierType type;
568 bool processed = true;
570 type = (ProcSignalBarrierType) pg_rightmost_one_pos32(flags);
571 switch (type)
573 case PROCSIGNAL_BARRIER_SMGRRELEASE:
574 processed = ProcessBarrierSmgrRelease();
575 break;
579 * To avoid an infinite loop, we must always unset the bit in
580 * flags.
582 BARRIER_CLEAR_BIT(flags, type);
585 * If we failed to process the barrier, reset the shared bit
586 * so we try again later, and set a flag so that we don't bump
587 * our generation.
589 if (!processed)
591 ResetProcSignalBarrierBits(((uint32) 1) << type);
592 success = false;
596 PG_CATCH();
599 * If an ERROR occurred, we'll need to try again later to handle
600 * that barrier type and any others that haven't been handled yet
601 * or weren't successfully absorbed.
603 ResetProcSignalBarrierBits(flags);
604 PG_RE_THROW();
606 PG_END_TRY();
609 * If some barrier types were not successfully absorbed, we will have
610 * to try again later.
612 if (!success)
613 return;
617 * State changes related to all types of barriers that might have been
618 * emitted have now been handled, so we can update our notion of the
619 * generation to the one we observed before beginning the updates. If
620 * things have changed further, it'll get fixed up when this function is
621 * next called.
623 pg_atomic_write_u64(&MyProcSignalSlot->pss_barrierGeneration, shared_gen);
624 ConditionVariableBroadcast(&MyProcSignalSlot->pss_barrierCV);
628 * If it turns out that we couldn't absorb one or more barrier types, either
629 * because the barrier-processing functions returned false or due to an error,
630 * arrange for processing to be retried later.
632 static void
633 ResetProcSignalBarrierBits(uint32 flags)
635 pg_atomic_fetch_or_u32(&MyProcSignalSlot->pss_barrierCheckMask, flags);
636 ProcSignalBarrierPending = true;
637 InterruptPending = true;
641 * CheckProcSignal - check to see if a particular reason has been
642 * signaled, and clear the signal flag. Should be called after receiving
643 * SIGUSR1.
645 static bool
646 CheckProcSignal(ProcSignalReason reason)
648 volatile ProcSignalSlot *slot = MyProcSignalSlot;
650 if (slot != NULL)
653 * Careful here --- don't clear flag if we haven't seen it set.
654 * pss_signalFlags is of type "volatile sig_atomic_t" to allow us to
655 * read it here safely, without holding the spinlock.
657 if (slot->pss_signalFlags[reason])
659 slot->pss_signalFlags[reason] = false;
660 return true;
664 return false;
668 * procsignal_sigusr1_handler - handle SIGUSR1 signal.
670 void
671 procsignal_sigusr1_handler(SIGNAL_ARGS)
673 if (CheckProcSignal(PROCSIG_CATCHUP_INTERRUPT))
674 HandleCatchupInterrupt();
676 if (CheckProcSignal(PROCSIG_NOTIFY_INTERRUPT))
677 HandleNotifyInterrupt();
679 if (CheckProcSignal(PROCSIG_PARALLEL_MESSAGE))
680 HandleParallelMessageInterrupt();
682 if (CheckProcSignal(PROCSIG_WALSND_INIT_STOPPING))
683 HandleWalSndInitStopping();
685 if (CheckProcSignal(PROCSIG_BARRIER))
686 HandleProcSignalBarrierInterrupt();
688 if (CheckProcSignal(PROCSIG_LOG_MEMORY_CONTEXT))
689 HandleLogMemoryContextInterrupt();
691 if (CheckProcSignal(PROCSIG_PARALLEL_APPLY_MESSAGE))
692 HandleParallelApplyMessageInterrupt();
694 if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_DATABASE))
695 HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_DATABASE);
697 if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_TABLESPACE))
698 HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_TABLESPACE);
700 if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_LOCK))
701 HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_LOCK);
703 if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_SNAPSHOT))
704 HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_SNAPSHOT);
706 if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_LOGICALSLOT))
707 HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_LOGICALSLOT);
709 if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK))
710 HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK);
712 if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN))
713 HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN);
715 SetLatch(MyLatch);
719 * Send a query cancellation signal to backend.
721 * Note: This is called from a backend process before authentication. We
722 * cannot take LWLocks yet, but that's OK; we rely on atomic reads of the
723 * fields in the ProcSignal slots.
725 void
726 SendCancelRequest(int backendPID, int32 cancelAuthCode)
728 Assert(backendPID != 0);
731 * See if we have a matching backend. Reading the pss_pid and
732 * pss_cancel_key fields is racy, a backend might die and remove itself
733 * from the array at any time. The probability of the cancellation key
734 * matching wrong process is miniscule, however, so we can live with that.
735 * PIDs are reused too, so sending the signal based on PID is inherently
736 * racy anyway, although OS's avoid reusing PIDs too soon.
738 for (int i = 0; i < NumProcSignalSlots; i++)
740 ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
741 bool match;
743 if (pg_atomic_read_u32(&slot->pss_pid) != backendPID)
744 continue;
746 /* Acquire the spinlock and re-check */
747 SpinLockAcquire(&slot->pss_mutex);
748 if (pg_atomic_read_u32(&slot->pss_pid) != backendPID)
750 SpinLockRelease(&slot->pss_mutex);
751 continue;
753 else
755 match = slot->pss_cancel_key_valid && slot->pss_cancel_key == cancelAuthCode;
757 SpinLockRelease(&slot->pss_mutex);
759 if (match)
761 /* Found a match; signal that backend to cancel current op */
762 ereport(DEBUG2,
763 (errmsg_internal("processing cancel request: sending SIGINT to process %d",
764 backendPID)));
767 * If we have setsid(), signal the backend's whole process
768 * group
770 #ifdef HAVE_SETSID
771 kill(-backendPID, SIGINT);
772 #else
773 kill(backendPID, SIGINT);
774 #endif
776 else
778 /* Right PID, wrong key: no way, Jose */
779 ereport(LOG,
780 (errmsg("wrong key in cancel request for process %d",
781 backendPID)));
783 return;
787 /* No matching backend */
788 ereport(LOG,
789 (errmsg("PID %d in cancel request did not match any process",
790 backendPID)));