1 /*-------------------------------------------------------------------------
4 * Lightweight lock manager
7 * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
10 * src/include/storage/lwlock.h
12 *-------------------------------------------------------------------------
18 #error "lwlock.h may not be included from frontend code"
21 #include "port/atomics.h"
22 #include "storage/lwlocknames.h"
23 #include "storage/proclist_types.h"
27 /* what state of the wait process is a backend in */
28 typedef enum LWLockWaitState
30 LW_WS_NOT_WAITING
, /* not currently waiting / woken up */
31 LW_WS_WAITING
, /* currently waiting */
32 LW_WS_PENDING_WAKEUP
, /* removed from waitlist, but not yet
37 * Code outside of lwlock.c should not manipulate the contents of this
38 * structure directly, but we have to declare it here to allow LWLocks to be
39 * incorporated into other data structures.
43 uint16 tranche
; /* tranche ID */
44 pg_atomic_uint32 state
; /* state of exclusive/nonexclusive lockers */
45 proclist_head waiters
; /* list of waiting PGPROCs */
47 pg_atomic_uint32 nwaiters
; /* number of waiters */
48 struct PGPROC
*owner
; /* last exclusive owner of the lock */
53 * In most cases, it's desirable to force each tranche of LWLocks to be aligned
54 * on a cache line boundary and make the array stride a power of 2. This saves
55 * a few cycles in indexing, but more importantly ensures that individual
56 * LWLocks don't cross cache line boundaries. This reduces cache contention
57 * problems, especially on AMD Opterons. In some cases, it's useful to add
58 * even more padding so that each LWLock takes up an entire cache line; this is
59 * useful, for example, in the main LWLock array, where the overall number of
60 * locks is small but some are heavily contended.
62 #define LWLOCK_PADDED_SIZE PG_CACHE_LINE_SIZE
64 StaticAssertDecl(sizeof(LWLock
) <= LWLOCK_PADDED_SIZE
,
65 "Miscalculated LWLock padding");
67 /* LWLock, padded to a full cache line size */
68 typedef union LWLockPadded
71 char pad
[LWLOCK_PADDED_SIZE
];
74 extern PGDLLIMPORT LWLockPadded
*MainLWLockArray
;
76 /* struct for storing named tranche information */
77 typedef struct NamedLWLockTranche
83 extern PGDLLIMPORT NamedLWLockTranche
*NamedLWLockTrancheArray
;
84 extern PGDLLIMPORT
int NamedLWLockTrancheRequests
;
87 * It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS
88 * here, but we need them to figure out offsets within MainLWLockArray, and
89 * having this file include lock.h or bufmgr.h would be backwards.
92 /* Number of partitions of the shared buffer mapping hashtable */
93 #define NUM_BUFFER_PARTITIONS 128
95 /* Number of partitions the shared lock tables are divided into */
96 #define LOG2_NUM_LOCK_PARTITIONS 4
97 #define NUM_LOCK_PARTITIONS (1 << LOG2_NUM_LOCK_PARTITIONS)
99 /* Number of partitions the shared predicate lock tables are divided into */
100 #define LOG2_NUM_PREDICATELOCK_PARTITIONS 4
101 #define NUM_PREDICATELOCK_PARTITIONS (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
103 /* Offsets for various chunks of preallocated lwlocks. */
104 #define BUFFER_MAPPING_LWLOCK_OFFSET NUM_INDIVIDUAL_LWLOCKS
105 #define LOCK_MANAGER_LWLOCK_OFFSET \
106 (BUFFER_MAPPING_LWLOCK_OFFSET + NUM_BUFFER_PARTITIONS)
107 #define PREDICATELOCK_MANAGER_LWLOCK_OFFSET \
108 (LOCK_MANAGER_LWLOCK_OFFSET + NUM_LOCK_PARTITIONS)
109 #define NUM_FIXED_LWLOCKS \
110 (PREDICATELOCK_MANAGER_LWLOCK_OFFSET + NUM_PREDICATELOCK_PARTITIONS)
112 typedef enum LWLockMode
116 LW_WAIT_UNTIL_FREE
, /* A special mode used in PGPROC->lwWaitMode,
117 * when waiting for lock to become free. Not
118 * to be used as LWLockAcquire argument */
123 extern PGDLLIMPORT
bool Trace_lwlocks
;
126 extern bool LWLockAcquire(LWLock
*lock
, LWLockMode mode
);
127 extern bool LWLockConditionalAcquire(LWLock
*lock
, LWLockMode mode
);
128 extern bool LWLockAcquireOrWait(LWLock
*lock
, LWLockMode mode
);
129 extern void LWLockRelease(LWLock
*lock
);
130 extern void LWLockReleaseClearVar(LWLock
*lock
, pg_atomic_uint64
*valptr
, uint64 val
);
131 extern void LWLockReleaseAll(void);
132 extern bool LWLockHeldByMe(LWLock
*lock
);
133 extern bool LWLockAnyHeldByMe(LWLock
*lock
, int nlocks
, size_t stride
);
134 extern bool LWLockHeldByMeInMode(LWLock
*lock
, LWLockMode mode
);
136 extern bool LWLockWaitForVar(LWLock
*lock
, pg_atomic_uint64
*valptr
, uint64 oldval
, uint64
*newval
);
137 extern void LWLockUpdateVar(LWLock
*lock
, pg_atomic_uint64
*valptr
, uint64 val
);
139 extern Size
LWLockShmemSize(void);
140 extern void CreateLWLocks(void);
141 extern void InitLWLockAccess(void);
143 extern const char *GetLWLockIdentifier(uint32 classId
, uint16 eventId
);
146 * Extensions (or core code) can obtain an LWLocks by calling
147 * RequestNamedLWLockTranche() during postmaster startup. Subsequently,
148 * call GetNamedLWLockTranche() to obtain a pointer to an array containing
149 * the number of LWLocks requested.
151 extern void RequestNamedLWLockTranche(const char *tranche_name
, int num_lwlocks
);
152 extern LWLockPadded
*GetNamedLWLockTranche(const char *tranche_name
);
155 * There is another, more flexible method of obtaining lwlocks. First, call
156 * LWLockNewTrancheId just once to obtain a tranche ID; this allocates from
157 * a shared counter. Next, each individual process using the tranche should
158 * call LWLockRegisterTranche() to associate that tranche ID with a name.
159 * Finally, LWLockInitialize should be called just once per lwlock, passing
160 * the tranche ID as an argument.
162 * It may seem strange that each process using the tranche must register it
163 * separately, but dynamic shared memory segments aren't guaranteed to be
164 * mapped at the same address in all coordinating backends, so storing the
165 * registration in the main shared memory segment wouldn't work for that case.
167 extern int LWLockNewTrancheId(void);
168 extern void LWLockRegisterTranche(int tranche_id
, const char *tranche_name
);
169 extern void LWLockInitialize(LWLock
*lock
, int tranche_id
);
172 * Every tranche ID less than NUM_INDIVIDUAL_LWLOCKS is reserved; also,
173 * we reserve additional tranche IDs for builtin tranches not included in
174 * the set of individual LWLocks. A call to LWLockNewTrancheId will never
175 * return a value less than LWTRANCHE_FIRST_USER_DEFINED.
177 typedef enum BuiltinTrancheIds
179 LWTRANCHE_XACT_BUFFER
= NUM_INDIVIDUAL_LWLOCKS
,
180 LWTRANCHE_COMMITTS_BUFFER
,
181 LWTRANCHE_SUBTRANS_BUFFER
,
182 LWTRANCHE_MULTIXACTOFFSET_BUFFER
,
183 LWTRANCHE_MULTIXACTMEMBER_BUFFER
,
184 LWTRANCHE_NOTIFY_BUFFER
,
185 LWTRANCHE_SERIAL_BUFFER
,
186 LWTRANCHE_WAL_INSERT
,
187 LWTRANCHE_BUFFER_CONTENT
,
188 LWTRANCHE_REPLICATION_ORIGIN_STATE
,
189 LWTRANCHE_REPLICATION_SLOT_IO
,
190 LWTRANCHE_LOCK_FASTPATH
,
191 LWTRANCHE_BUFFER_MAPPING
,
192 LWTRANCHE_LOCK_MANAGER
,
193 LWTRANCHE_PREDICATE_LOCK_MANAGER
,
194 LWTRANCHE_PARALLEL_HASH_JOIN
,
195 LWTRANCHE_PARALLEL_QUERY_DSA
,
196 LWTRANCHE_PER_SESSION_DSA
,
197 LWTRANCHE_PER_SESSION_RECORD_TYPE
,
198 LWTRANCHE_PER_SESSION_RECORD_TYPMOD
,
199 LWTRANCHE_SHARED_TUPLESTORE
,
200 LWTRANCHE_SHARED_TIDBITMAP
,
201 LWTRANCHE_PARALLEL_APPEND
,
202 LWTRANCHE_PER_XACT_PREDICATE_LIST
,
203 LWTRANCHE_PGSTATS_DSA
,
204 LWTRANCHE_PGSTATS_HASH
,
205 LWTRANCHE_PGSTATS_DATA
,
206 LWTRANCHE_LAUNCHER_DSA
,
207 LWTRANCHE_LAUNCHER_HASH
,
208 LWTRANCHE_DSM_REGISTRY_DSA
,
209 LWTRANCHE_DSM_REGISTRY_HASH
,
210 LWTRANCHE_COMMITTS_SLRU
,
211 LWTRANCHE_MULTIXACTMEMBER_SLRU
,
212 LWTRANCHE_MULTIXACTOFFSET_SLRU
,
213 LWTRANCHE_NOTIFY_SLRU
,
214 LWTRANCHE_SERIAL_SLRU
,
215 LWTRANCHE_SUBTRANS_SLRU
,
217 LWTRANCHE_PARALLEL_VACUUM_DSA
,
218 LWTRANCHE_FIRST_USER_DEFINED
,
222 * Prior to PostgreSQL 9.4, we used an enum type called LWLockId to refer
223 * to LWLocks. New code should instead use LWLock *. However, for the
224 * convenience of third-party code, we include the following typedef.
226 typedef LWLock
*LWLockId
;
228 #endif /* LWLOCK_H */