1 /*-------------------------------------------------------------------------
4 * Routines for interprocess signaling
7 * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/storage/ipc/procsignal.c
13 *-------------------------------------------------------------------------
20 #include "access/parallel.h"
21 #include "commands/async.h"
22 #include "miscadmin.h"
24 #include "port/pg_bitutils.h"
25 #include "replication/logicalworker.h"
26 #include "replication/walsender.h"
27 #include "storage/condition_variable.h"
28 #include "storage/ipc.h"
29 #include "storage/latch.h"
30 #include "storage/shmem.h"
31 #include "storage/sinval.h"
32 #include "storage/smgr.h"
33 #include "tcop/tcopprot.h"
34 #include "utils/memutils.h"
37 * The SIGUSR1 signal is multiplexed to support signaling multiple event
38 * types. The specific reason is communicated via flags in shared memory.
39 * We keep a boolean flag for each possible "reason", so that different
40 * reasons can be signaled to a process concurrently. (However, if the same
41 * reason is signaled more than once nearly simultaneously, the process may
42 * observe it only once.)
44 * Each process that wants to receive signals registers its process ID
45 * in the ProcSignalSlots array. The array is indexed by ProcNumber to make
46 * slot allocation simple, and to avoid having to search the array when you
47 * know the ProcNumber of the process you're signaling. (We do support
48 * signaling without ProcNumber, but it's a bit less efficient.)
50 * The fields in each slot are protected by a spinlock, pss_mutex. pss_pid can
51 * also be read without holding the spinlock, as a quick preliminary check
52 * when searching for a particular PID in the array.
54 * pss_signalFlags are intended to be set in cases where we don't need to
55 * keep track of whether or not the target process has handled the signal,
56 * but sometimes we need confirmation, as when making a global state change
57 * that cannot be considered complete until all backends have taken notice
58 * of it. For such use cases, we set a bit in pss_barrierCheckMask and then
59 * increment the current "barrier generation"; when the new barrier generation
60 * (or greater) appears in the pss_barrierGeneration flag of every process,
61 * we know that the message has been received everywhere.
65 pg_atomic_uint32 pss_pid
;
66 bool pss_cancel_key_valid
;
68 volatile sig_atomic_t pss_signalFlags
[NUM_PROCSIGNALS
];
69 slock_t pss_mutex
; /* protects the above fields */
71 /* Barrier-related fields (not protected by pss_mutex) */
72 pg_atomic_uint64 pss_barrierGeneration
;
73 pg_atomic_uint32 pss_barrierCheckMask
;
74 ConditionVariable pss_barrierCV
;
78 * Information that is global to the entire ProcSignal system can be stored
81 * psh_barrierGeneration is the highest barrier generation in existence.
83 struct ProcSignalHeader
85 pg_atomic_uint64 psh_barrierGeneration
;
86 ProcSignalSlot psh_slot
[FLEXIBLE_ARRAY_MEMBER
];
90 * We reserve a slot for each possible ProcNumber, plus one for each
91 * possible auxiliary process type. (This scheme assumes there is not
92 * more than one of any auxiliary process type at a time.)
94 #define NumProcSignalSlots (MaxBackends + NUM_AUXILIARY_PROCS)
96 /* Check whether the relevant type bit is set in the flags. */
97 #define BARRIER_SHOULD_CHECK(flags, type) \
98 (((flags) & (((uint32) 1) << (uint32) (type))) != 0)
100 /* Clear the relevant type bit from the flags. */
101 #define BARRIER_CLEAR_BIT(flags, type) \
102 ((flags) &= ~(((uint32) 1) << (uint32) (type)))
104 NON_EXEC_STATIC ProcSignalHeader
*ProcSignal
= NULL
;
105 static ProcSignalSlot
*MyProcSignalSlot
= NULL
;
107 static bool CheckProcSignal(ProcSignalReason reason
);
108 static void CleanupProcSignalState(int status
, Datum arg
);
109 static void ResetProcSignalBarrierBits(uint32 flags
);
112 * ProcSignalShmemSize
113 * Compute space needed for ProcSignal's shared memory
116 ProcSignalShmemSize(void)
120 size
= mul_size(NumProcSignalSlots
, sizeof(ProcSignalSlot
));
121 size
= add_size(size
, offsetof(ProcSignalHeader
, psh_slot
));
126 * ProcSignalShmemInit
127 * Allocate and initialize ProcSignal's shared memory
130 ProcSignalShmemInit(void)
132 Size size
= ProcSignalShmemSize();
135 ProcSignal
= (ProcSignalHeader
*)
136 ShmemInitStruct("ProcSignal", size
, &found
);
138 /* If we're first, initialize. */
143 pg_atomic_init_u64(&ProcSignal
->psh_barrierGeneration
, 0);
145 for (i
= 0; i
< NumProcSignalSlots
; ++i
)
147 ProcSignalSlot
*slot
= &ProcSignal
->psh_slot
[i
];
149 SpinLockInit(&slot
->pss_mutex
);
150 pg_atomic_init_u32(&slot
->pss_pid
, 0);
151 slot
->pss_cancel_key_valid
= false;
152 slot
->pss_cancel_key
= 0;
153 MemSet(slot
->pss_signalFlags
, 0, sizeof(slot
->pss_signalFlags
));
154 pg_atomic_init_u64(&slot
->pss_barrierGeneration
, PG_UINT64_MAX
);
155 pg_atomic_init_u32(&slot
->pss_barrierCheckMask
, 0);
156 ConditionVariableInit(&slot
->pss_barrierCV
);
163 * Register the current process in the ProcSignal array
166 ProcSignalInit(bool cancel_key_valid
, int32 cancel_key
)
168 ProcSignalSlot
*slot
;
169 uint64 barrier_generation
;
171 if (MyProcNumber
< 0)
172 elog(ERROR
, "MyProcNumber not set");
173 if (MyProcNumber
>= NumProcSignalSlots
)
174 elog(ERROR
, "unexpected MyProcNumber %d in ProcSignalInit (max %d)", MyProcNumber
, NumProcSignalSlots
);
175 slot
= &ProcSignal
->psh_slot
[MyProcNumber
];
178 SpinLockAcquire(&slot
->pss_mutex
);
179 if (pg_atomic_read_u32(&slot
->pss_pid
) != 0)
181 SpinLockRelease(&slot
->pss_mutex
);
182 elog(LOG
, "process %d taking over ProcSignal slot %d, but it's not empty",
183 MyProcPid
, MyProcNumber
);
186 /* Clear out any leftover signal reasons */
187 MemSet(slot
->pss_signalFlags
, 0, NUM_PROCSIGNALS
* sizeof(sig_atomic_t));
190 * Initialize barrier state. Since we're a brand-new process, there
191 * shouldn't be any leftover backend-private state that needs to be
192 * updated. Therefore, we can broadcast the latest barrier generation and
193 * disregard any previously-set check bits.
195 * NB: This only works if this initialization happens early enough in the
196 * startup sequence that we haven't yet cached any state that might need
197 * to be invalidated. That's also why we have a memory barrier here, to be
198 * sure that any later reads of memory happen strictly after this.
200 pg_atomic_write_u32(&slot
->pss_barrierCheckMask
, 0);
202 pg_atomic_read_u64(&ProcSignal
->psh_barrierGeneration
);
203 pg_atomic_write_u64(&slot
->pss_barrierGeneration
, barrier_generation
);
205 slot
->pss_cancel_key_valid
= cancel_key_valid
;
206 slot
->pss_cancel_key
= cancel_key
;
207 pg_atomic_write_u32(&slot
->pss_pid
, MyProcPid
);
209 SpinLockRelease(&slot
->pss_mutex
);
211 /* Remember slot location for CheckProcSignal */
212 MyProcSignalSlot
= slot
;
214 /* Set up to release the slot on process exit */
215 on_shmem_exit(CleanupProcSignalState
, (Datum
) 0);
219 * CleanupProcSignalState
220 * Remove current process from ProcSignal mechanism
222 * This function is called via on_shmem_exit() during backend shutdown.
225 CleanupProcSignalState(int status
, Datum arg
)
228 ProcSignalSlot
*slot
= MyProcSignalSlot
;
231 * Clear MyProcSignalSlot, so that a SIGUSR1 received after this point
232 * won't try to access it after it's no longer ours (and perhaps even
233 * after we've unmapped the shared memory segment).
235 Assert(MyProcSignalSlot
!= NULL
);
236 MyProcSignalSlot
= NULL
;
239 SpinLockAcquire(&slot
->pss_mutex
);
240 old_pid
= pg_atomic_read_u32(&slot
->pss_pid
);
241 if (old_pid
!= MyProcPid
)
244 * don't ERROR here. We're exiting anyway, and don't want to get into
245 * infinite loop trying to exit
247 SpinLockRelease(&slot
->pss_mutex
);
248 elog(LOG
, "process %d releasing ProcSignal slot %d, but it contains %d",
249 MyProcPid
, (int) (slot
- ProcSignal
->psh_slot
), (int) old_pid
);
250 return; /* XXX better to zero the slot anyway? */
253 /* Mark the slot as unused */
254 pg_atomic_write_u32(&slot
->pss_pid
, 0);
255 slot
->pss_cancel_key_valid
= false;
256 slot
->pss_cancel_key
= 0;
259 * Make this slot look like it's absorbed all possible barriers, so that
260 * no barrier waits block on it.
262 pg_atomic_write_u64(&slot
->pss_barrierGeneration
, PG_UINT64_MAX
);
264 SpinLockRelease(&slot
->pss_mutex
);
266 ConditionVariableBroadcast(&slot
->pss_barrierCV
);
271 * Send a signal to a Postgres process
273 * Providing procNumber is optional, but it will speed up the operation.
275 * On success (a signal was sent), zero is returned.
276 * On error, -1 is returned, and errno is set (typically to ESRCH or EPERM).
278 * Not to be confused with ProcSendSignal
281 SendProcSignal(pid_t pid
, ProcSignalReason reason
, ProcNumber procNumber
)
283 volatile ProcSignalSlot
*slot
;
285 if (procNumber
!= INVALID_PROC_NUMBER
)
287 Assert(procNumber
< NumProcSignalSlots
);
288 slot
= &ProcSignal
->psh_slot
[procNumber
];
290 SpinLockAcquire(&slot
->pss_mutex
);
291 if (pg_atomic_read_u32(&slot
->pss_pid
) == pid
)
293 /* Atomically set the proper flag */
294 slot
->pss_signalFlags
[reason
] = true;
295 SpinLockRelease(&slot
->pss_mutex
);
297 return kill(pid
, SIGUSR1
);
299 SpinLockRelease(&slot
->pss_mutex
);
304 * procNumber not provided, so search the array using pid. We search
305 * the array back to front so as to reduce search overhead. Passing
306 * INVALID_PROC_NUMBER means that the target is most likely an
307 * auxiliary process, which will have a slot near the end of the
312 for (i
= NumProcSignalSlots
- 1; i
>= 0; i
--)
314 slot
= &ProcSignal
->psh_slot
[i
];
316 if (pg_atomic_read_u32(&slot
->pss_pid
) == pid
)
318 SpinLockAcquire(&slot
->pss_mutex
);
319 if (pg_atomic_read_u32(&slot
->pss_pid
) == pid
)
321 /* Atomically set the proper flag */
322 slot
->pss_signalFlags
[reason
] = true;
323 SpinLockRelease(&slot
->pss_mutex
);
325 return kill(pid
, SIGUSR1
);
327 SpinLockRelease(&slot
->pss_mutex
);
337 * EmitProcSignalBarrier
338 * Send a signal to every Postgres process
340 * The return value of this function is the barrier "generation" created
341 * by this operation. This value can be passed to WaitForProcSignalBarrier
342 * to wait until it is known that every participant in the ProcSignal
343 * mechanism has absorbed the signal (or started afterwards).
345 * Note that it would be a bad idea to use this for anything that happens
346 * frequently, as interrupting every backend could cause a noticeable
349 * Callers are entitled to assume that this function will not throw ERROR
353 EmitProcSignalBarrier(ProcSignalBarrierType type
)
355 uint32 flagbit
= 1 << (uint32
) type
;
361 * Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this is
362 * totally ordered with respect to anything the caller did before, and
363 * anything that we do afterwards. (This is also true of the later call to
364 * pg_atomic_add_fetch_u64.)
366 for (int i
= 0; i
< NumProcSignalSlots
; i
++)
368 volatile ProcSignalSlot
*slot
= &ProcSignal
->psh_slot
[i
];
370 pg_atomic_fetch_or_u32(&slot
->pss_barrierCheckMask
, flagbit
);
374 * Increment the generation counter.
377 pg_atomic_add_fetch_u64(&ProcSignal
->psh_barrierGeneration
, 1);
380 * Signal all the processes, so that they update their advertised barrier
383 * Concurrency is not a problem here. Backends that have exited don't
384 * matter, and new backends that have joined since we entered this
385 * function must already have current state, since the caller is
386 * responsible for making sure that the relevant state is entirely visible
387 * before calling this function in the first place. We still have to wake
388 * them up - because we can't distinguish between such backends and older
389 * backends that need to update state - but they won't actually need to
392 for (int i
= NumProcSignalSlots
- 1; i
>= 0; i
--)
394 volatile ProcSignalSlot
*slot
= &ProcSignal
->psh_slot
[i
];
395 pid_t pid
= pg_atomic_read_u32(&slot
->pss_pid
);
399 SpinLockAcquire(&slot
->pss_mutex
);
400 pid
= pg_atomic_read_u32(&slot
->pss_pid
);
403 /* see SendProcSignal for details */
404 slot
->pss_signalFlags
[PROCSIG_BARRIER
] = true;
405 SpinLockRelease(&slot
->pss_mutex
);
409 SpinLockRelease(&slot
->pss_mutex
);
417 * WaitForProcSignalBarrier - wait until it is guaranteed that all changes
418 * requested by a specific call to EmitProcSignalBarrier() have taken effect.
421 WaitForProcSignalBarrier(uint64 generation
)
423 Assert(generation
<= pg_atomic_read_u64(&ProcSignal
->psh_barrierGeneration
));
426 "waiting for all backends to process ProcSignalBarrier generation "
430 for (int i
= NumProcSignalSlots
- 1; i
>= 0; i
--)
432 ProcSignalSlot
*slot
= &ProcSignal
->psh_slot
[i
];
436 * It's important that we check only pss_barrierGeneration here and
437 * not pss_barrierCheckMask. Bits in pss_barrierCheckMask get cleared
438 * before the barrier is actually absorbed, but pss_barrierGeneration
439 * is updated only afterward.
441 oldval
= pg_atomic_read_u64(&slot
->pss_barrierGeneration
);
442 while (oldval
< generation
)
444 if (ConditionVariableTimedSleep(&slot
->pss_barrierCV
,
446 WAIT_EVENT_PROC_SIGNAL_BARRIER
))
448 (errmsg("still waiting for backend with PID %d to accept ProcSignalBarrier",
449 (int) pg_atomic_read_u32(&slot
->pss_pid
))));
450 oldval
= pg_atomic_read_u64(&slot
->pss_barrierGeneration
);
452 ConditionVariableCancelSleep();
456 "finished waiting for all backends to process ProcSignalBarrier generation "
461 * The caller is probably calling this function because it wants to read
462 * the shared state or perform further writes to shared state once all
463 * backends are known to have absorbed the barrier. However, the read of
464 * pss_barrierGeneration was performed unlocked; insert a memory barrier
465 * to separate it from whatever follows.
471 * Handle receipt of an interrupt indicating a global barrier event.
473 * All the actual work is deferred to ProcessProcSignalBarrier(), because we
474 * cannot safely access the barrier generation inside the signal handler as
475 * 64bit atomics might use spinlock based emulation, even for reads. As this
476 * routine only gets called when PROCSIG_BARRIER is sent that won't cause a
477 * lot of unnecessary work.
480 HandleProcSignalBarrierInterrupt(void)
482 InterruptPending
= true;
483 ProcSignalBarrierPending
= true;
484 /* latch will be set by procsignal_sigusr1_handler */
488 * Perform global barrier related interrupt checking.
490 * Any backend that participates in ProcSignal signaling must arrange to
491 * call this function periodically. It is called from CHECK_FOR_INTERRUPTS(),
492 * which is enough for normal backends, but not necessarily for all types of
493 * background processes.
496 ProcessProcSignalBarrier(void)
500 volatile uint32 flags
;
502 Assert(MyProcSignalSlot
);
504 /* Exit quickly if there's no work to do. */
505 if (!ProcSignalBarrierPending
)
507 ProcSignalBarrierPending
= false;
510 * It's not unlikely to process multiple barriers at once, before the
511 * signals for all the barriers have arrived. To avoid unnecessary work in
512 * response to subsequent signals, exit early if we already have processed
515 local_gen
= pg_atomic_read_u64(&MyProcSignalSlot
->pss_barrierGeneration
);
516 shared_gen
= pg_atomic_read_u64(&ProcSignal
->psh_barrierGeneration
);
518 Assert(local_gen
<= shared_gen
);
520 if (local_gen
== shared_gen
)
524 * Get and clear the flags that are set for this backend. Note that
525 * pg_atomic_exchange_u32 is a full barrier, so we're guaranteed that the
526 * read of the barrier generation above happens before we atomically
527 * extract the flags, and that any subsequent state changes happen
530 * NB: In order to avoid race conditions, we must zero
531 * pss_barrierCheckMask first and only afterwards try to do barrier
532 * processing. If we did it in the other order, someone could send us
533 * another barrier of some type right after we called the
534 * barrier-processing function but before we cleared the bit. We would
535 * have no way of knowing that the bit needs to stay set in that case, so
536 * the need to call the barrier-processing function again would just get
537 * forgotten. So instead, we tentatively clear all the bits and then put
538 * back any for which we don't manage to successfully absorb the barrier.
540 flags
= pg_atomic_exchange_u32(&MyProcSignalSlot
->pss_barrierCheckMask
, 0);
543 * If there are no flags set, then we can skip doing any real work.
544 * Otherwise, establish a PG_TRY block, so that we don't lose track of
545 * which types of barrier processing are needed if an ERROR occurs.
554 * Process each type of barrier. The barrier-processing functions
555 * should normally return true, but may return false if the
556 * barrier can't be absorbed at the current time. This should be
557 * rare, because it's pretty expensive. Every single
558 * CHECK_FOR_INTERRUPTS() will return here until we manage to
559 * absorb the barrier, and that cost will add up in a hurry.
561 * NB: It ought to be OK to call the barrier-processing functions
562 * unconditionally, but it's more efficient to call only the ones
563 * that might need us to do something based on the flags.
567 ProcSignalBarrierType type
;
568 bool processed
= true;
570 type
= (ProcSignalBarrierType
) pg_rightmost_one_pos32(flags
);
573 case PROCSIGNAL_BARRIER_SMGRRELEASE
:
574 processed
= ProcessBarrierSmgrRelease();
579 * To avoid an infinite loop, we must always unset the bit in
582 BARRIER_CLEAR_BIT(flags
, type
);
585 * If we failed to process the barrier, reset the shared bit
586 * so we try again later, and set a flag so that we don't bump
591 ResetProcSignalBarrierBits(((uint32
) 1) << type
);
599 * If an ERROR occurred, we'll need to try again later to handle
600 * that barrier type and any others that haven't been handled yet
601 * or weren't successfully absorbed.
603 ResetProcSignalBarrierBits(flags
);
609 * If some barrier types were not successfully absorbed, we will have
610 * to try again later.
617 * State changes related to all types of barriers that might have been
618 * emitted have now been handled, so we can update our notion of the
619 * generation to the one we observed before beginning the updates. If
620 * things have changed further, it'll get fixed up when this function is
623 pg_atomic_write_u64(&MyProcSignalSlot
->pss_barrierGeneration
, shared_gen
);
624 ConditionVariableBroadcast(&MyProcSignalSlot
->pss_barrierCV
);
628 * If it turns out that we couldn't absorb one or more barrier types, either
629 * because the barrier-processing functions returned false or due to an error,
630 * arrange for processing to be retried later.
633 ResetProcSignalBarrierBits(uint32 flags
)
635 pg_atomic_fetch_or_u32(&MyProcSignalSlot
->pss_barrierCheckMask
, flags
);
636 ProcSignalBarrierPending
= true;
637 InterruptPending
= true;
641 * CheckProcSignal - check to see if a particular reason has been
642 * signaled, and clear the signal flag. Should be called after receiving
646 CheckProcSignal(ProcSignalReason reason
)
648 volatile ProcSignalSlot
*slot
= MyProcSignalSlot
;
653 * Careful here --- don't clear flag if we haven't seen it set.
654 * pss_signalFlags is of type "volatile sig_atomic_t" to allow us to
655 * read it here safely, without holding the spinlock.
657 if (slot
->pss_signalFlags
[reason
])
659 slot
->pss_signalFlags
[reason
] = false;
668 * procsignal_sigusr1_handler - handle SIGUSR1 signal.
671 procsignal_sigusr1_handler(SIGNAL_ARGS
)
673 if (CheckProcSignal(PROCSIG_CATCHUP_INTERRUPT
))
674 HandleCatchupInterrupt();
676 if (CheckProcSignal(PROCSIG_NOTIFY_INTERRUPT
))
677 HandleNotifyInterrupt();
679 if (CheckProcSignal(PROCSIG_PARALLEL_MESSAGE
))
680 HandleParallelMessageInterrupt();
682 if (CheckProcSignal(PROCSIG_WALSND_INIT_STOPPING
))
683 HandleWalSndInitStopping();
685 if (CheckProcSignal(PROCSIG_BARRIER
))
686 HandleProcSignalBarrierInterrupt();
688 if (CheckProcSignal(PROCSIG_LOG_MEMORY_CONTEXT
))
689 HandleLogMemoryContextInterrupt();
691 if (CheckProcSignal(PROCSIG_PARALLEL_APPLY_MESSAGE
))
692 HandleParallelApplyMessageInterrupt();
694 if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_DATABASE
))
695 HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_DATABASE
);
697 if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_TABLESPACE
))
698 HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_TABLESPACE
);
700 if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_LOCK
))
701 HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_LOCK
);
703 if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_SNAPSHOT
))
704 HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_SNAPSHOT
);
706 if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_LOGICALSLOT
))
707 HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_LOGICALSLOT
);
709 if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK
))
710 HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK
);
712 if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN
))
713 HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN
);
719 * Send a query cancellation signal to backend.
721 * Note: This is called from a backend process before authentication. We
722 * cannot take LWLocks yet, but that's OK; we rely on atomic reads of the
723 * fields in the ProcSignal slots.
726 SendCancelRequest(int backendPID
, int32 cancelAuthCode
)
728 Assert(backendPID
!= 0);
731 * See if we have a matching backend. Reading the pss_pid and
732 * pss_cancel_key fields is racy, a backend might die and remove itself
733 * from the array at any time. The probability of the cancellation key
734 * matching wrong process is miniscule, however, so we can live with that.
735 * PIDs are reused too, so sending the signal based on PID is inherently
736 * racy anyway, although OS's avoid reusing PIDs too soon.
738 for (int i
= 0; i
< NumProcSignalSlots
; i
++)
740 ProcSignalSlot
*slot
= &ProcSignal
->psh_slot
[i
];
743 if (pg_atomic_read_u32(&slot
->pss_pid
) != backendPID
)
746 /* Acquire the spinlock and re-check */
747 SpinLockAcquire(&slot
->pss_mutex
);
748 if (pg_atomic_read_u32(&slot
->pss_pid
) != backendPID
)
750 SpinLockRelease(&slot
->pss_mutex
);
755 match
= slot
->pss_cancel_key_valid
&& slot
->pss_cancel_key
== cancelAuthCode
;
757 SpinLockRelease(&slot
->pss_mutex
);
761 /* Found a match; signal that backend to cancel current op */
763 (errmsg_internal("processing cancel request: sending SIGINT to process %d",
767 * If we have setsid(), signal the backend's whole process
771 kill(-backendPID
, SIGINT
);
773 kill(backendPID
, SIGINT
);
778 /* Right PID, wrong key: no way, Jose */
780 (errmsg("wrong key in cancel request for process %d",
787 /* No matching backend */
789 (errmsg("PID %d in cancel request did not match any process",