Move routines to manipulate WAL into PostgreSQL::Test::Cluster
[pgsql.git] / src / backend / commands / trigger.c
blobacf3e4a3f1f7de95cf909ea5e4731201d4fba7e2
1 /*-------------------------------------------------------------------------
3 * trigger.c
4 * PostgreSQL TRIGGERs support code.
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
9 * IDENTIFICATION
10 * src/backend/commands/trigger.c
12 *-------------------------------------------------------------------------
14 #include "postgres.h"
16 #include "access/genam.h"
17 #include "access/htup_details.h"
18 #include "access/relation.h"
19 #include "access/sysattr.h"
20 #include "access/table.h"
21 #include "access/tableam.h"
22 #include "access/xact.h"
23 #include "catalog/catalog.h"
24 #include "catalog/dependency.h"
25 #include "catalog/indexing.h"
26 #include "catalog/objectaccess.h"
27 #include "catalog/partition.h"
28 #include "catalog/pg_constraint.h"
29 #include "catalog/pg_inherits.h"
30 #include "catalog/pg_proc.h"
31 #include "catalog/pg_trigger.h"
32 #include "catalog/pg_type.h"
33 #include "commands/dbcommands.h"
34 #include "commands/trigger.h"
35 #include "executor/executor.h"
36 #include "miscadmin.h"
37 #include "nodes/bitmapset.h"
38 #include "nodes/makefuncs.h"
39 #include "optimizer/optimizer.h"
40 #include "parser/parse_clause.h"
41 #include "parser/parse_collate.h"
42 #include "parser/parse_func.h"
43 #include "parser/parse_relation.h"
44 #include "partitioning/partdesc.h"
45 #include "pgstat.h"
46 #include "rewrite/rewriteManip.h"
47 #include "storage/lmgr.h"
48 #include "utils/acl.h"
49 #include "utils/builtins.h"
50 #include "utils/fmgroids.h"
51 #include "utils/guc_hooks.h"
52 #include "utils/inval.h"
53 #include "utils/lsyscache.h"
54 #include "utils/memutils.h"
55 #include "utils/plancache.h"
56 #include "utils/rel.h"
57 #include "utils/snapmgr.h"
58 #include "utils/syscache.h"
59 #include "utils/tuplestore.h"
62 /* GUC variables */
63 int SessionReplicationRole = SESSION_REPLICATION_ROLE_ORIGIN;
65 /* How many levels deep into trigger execution are we? */
66 static int MyTriggerDepth = 0;
68 /* Local function prototypes */
69 static void renametrig_internal(Relation tgrel, Relation targetrel,
70 HeapTuple trigtup, const char *newname,
71 const char *expected_name);
72 static void renametrig_partition(Relation tgrel, Oid partitionId,
73 Oid parentTriggerOid, const char *newname,
74 const char *expected_name);
75 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
76 static bool GetTupleForTrigger(EState *estate,
77 EPQState *epqstate,
78 ResultRelInfo *relinfo,
79 ItemPointer tid,
80 LockTupleMode lockmode,
81 TupleTableSlot *oldslot,
82 TupleTableSlot **epqslot,
83 TM_Result *tmresultp,
84 TM_FailureData *tmfdp);
85 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
86 Trigger *trigger, TriggerEvent event,
87 Bitmapset *modifiedCols,
88 TupleTableSlot *oldslot, TupleTableSlot *newslot);
89 static HeapTuple ExecCallTriggerFunc(TriggerData *trigdata,
90 int tgindx,
91 FmgrInfo *finfo,
92 Instrumentation *instr,
93 MemoryContext per_tuple_context);
94 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
95 ResultRelInfo *src_partinfo,
96 ResultRelInfo *dst_partinfo,
97 int event, bool row_trigger,
98 TupleTableSlot *oldslot, TupleTableSlot *newslot,
99 List *recheckIndexes, Bitmapset *modifiedCols,
100 TransitionCaptureState *transition_capture,
101 bool is_crosspart_update);
102 static void AfterTriggerEnlargeQueryState(void);
103 static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
107 * Create a trigger. Returns the address of the created trigger.
109 * queryString is the source text of the CREATE TRIGGER command.
110 * This must be supplied if a whenClause is specified, else it can be NULL.
112 * relOid, if nonzero, is the relation on which the trigger should be
113 * created. If zero, the name provided in the statement will be looked up.
115 * refRelOid, if nonzero, is the relation to which the constraint trigger
116 * refers. If zero, the constraint relation name provided in the statement
117 * will be looked up as needed.
119 * constraintOid, if nonzero, says that this trigger is being created
120 * internally to implement that constraint. A suitable pg_depend entry will
121 * be made to link the trigger to that constraint. constraintOid is zero when
122 * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
123 * TRIGGER, we build a pg_constraint entry internally.)
125 * indexOid, if nonzero, is the OID of an index associated with the constraint.
126 * We do nothing with this except store it into pg_trigger.tgconstrindid;
127 * but when creating a trigger for a deferrable unique constraint on a
128 * partitioned table, its children are looked up. Note we don't cope with
129 * invalid indexes in that case.
131 * funcoid, if nonzero, is the OID of the function to invoke. When this is
132 * given, stmt->funcname is ignored.
134 * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
135 * if that trigger is dropped, this one should be too. There are two cases
136 * when a nonzero value is passed for this: 1) when this function recurses to
137 * create the trigger on partitions, 2) when creating child foreign key
138 * triggers; see CreateFKCheckTrigger() and createForeignKeyActionTriggers().
140 * If whenClause is passed, it is an already-transformed expression for
141 * WHEN. In this case, we ignore any that may come in stmt->whenClause.
143 * If isInternal is true then this is an internally-generated trigger.
144 * This argument sets the tgisinternal field of the pg_trigger entry, and
145 * if true causes us to modify the given trigger name to ensure uniqueness.
147 * When isInternal is not true we require ACL_TRIGGER permissions on the
148 * relation, as well as ACL_EXECUTE on the trigger function. For internal
149 * triggers the caller must apply any required permission checks.
151 * When called on partitioned tables, this function recurses to create the
152 * trigger on all the partitions, except if isInternal is true, in which
153 * case caller is expected to execute recursion on its own. in_partition
154 * indicates such a recursive call; outside callers should pass "false"
155 * (but see CloneRowTriggersToPartition).
157 ObjectAddress
158 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
159 Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
160 Oid funcoid, Oid parentTriggerOid, Node *whenClause,
161 bool isInternal, bool in_partition)
163 return
164 CreateTriggerFiringOn(stmt, queryString, relOid, refRelOid,
165 constraintOid, indexOid, funcoid,
166 parentTriggerOid, whenClause, isInternal,
167 in_partition, TRIGGER_FIRES_ON_ORIGIN);
171 * Like the above; additionally the firing condition
172 * (always/origin/replica/disabled) can be specified.
174 ObjectAddress
175 CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
176 Oid relOid, Oid refRelOid, Oid constraintOid,
177 Oid indexOid, Oid funcoid, Oid parentTriggerOid,
178 Node *whenClause, bool isInternal, bool in_partition,
179 char trigger_fires_when)
181 int16 tgtype;
182 int ncolumns;
183 int16 *columns;
184 int2vector *tgattr;
185 List *whenRtable;
186 char *qual;
187 Datum values[Natts_pg_trigger];
188 bool nulls[Natts_pg_trigger];
189 Relation rel;
190 AclResult aclresult;
191 Relation tgrel;
192 Relation pgrel;
193 HeapTuple tuple = NULL;
194 Oid funcrettype;
195 Oid trigoid = InvalidOid;
196 char internaltrigname[NAMEDATALEN];
197 char *trigname;
198 Oid constrrelid = InvalidOid;
199 ObjectAddress myself,
200 referenced;
201 char *oldtablename = NULL;
202 char *newtablename = NULL;
203 bool partition_recurse;
204 bool trigger_exists = false;
205 Oid existing_constraint_oid = InvalidOid;
206 bool existing_isInternal = false;
207 bool existing_isClone = false;
209 if (OidIsValid(relOid))
210 rel = table_open(relOid, ShareRowExclusiveLock);
211 else
212 rel = table_openrv(stmt->relation, ShareRowExclusiveLock);
215 * Triggers must be on tables or views, and there are additional
216 * relation-type-specific restrictions.
218 if (rel->rd_rel->relkind == RELKIND_RELATION)
220 /* Tables can't have INSTEAD OF triggers */
221 if (stmt->timing != TRIGGER_TYPE_BEFORE &&
222 stmt->timing != TRIGGER_TYPE_AFTER)
223 ereport(ERROR,
224 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
225 errmsg("\"%s\" is a table",
226 RelationGetRelationName(rel)),
227 errdetail("Tables cannot have INSTEAD OF triggers.")));
229 else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
231 /* Partitioned tables can't have INSTEAD OF triggers */
232 if (stmt->timing != TRIGGER_TYPE_BEFORE &&
233 stmt->timing != TRIGGER_TYPE_AFTER)
234 ereport(ERROR,
235 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
236 errmsg("\"%s\" is a table",
237 RelationGetRelationName(rel)),
238 errdetail("Tables cannot have INSTEAD OF triggers.")));
241 * FOR EACH ROW triggers have further restrictions
243 if (stmt->row)
246 * Disallow use of transition tables.
248 * Note that we have another restriction about transition tables
249 * in partitions; search for 'has_superclass' below for an
250 * explanation. The check here is just to protect from the fact
251 * that if we allowed it here, the creation would succeed for a
252 * partitioned table with no partitions, but would be blocked by
253 * the other restriction when the first partition was created,
254 * which is very unfriendly behavior.
256 if (stmt->transitionRels != NIL)
257 ereport(ERROR,
258 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
259 errmsg("\"%s\" is a partitioned table",
260 RelationGetRelationName(rel)),
261 errdetail("ROW triggers with transition tables are not supported on partitioned tables.")));
264 else if (rel->rd_rel->relkind == RELKIND_VIEW)
267 * Views can have INSTEAD OF triggers (which we check below are
268 * row-level), or statement-level BEFORE/AFTER triggers.
270 if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
271 ereport(ERROR,
272 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
273 errmsg("\"%s\" is a view",
274 RelationGetRelationName(rel)),
275 errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
276 /* Disallow TRUNCATE triggers on VIEWs */
277 if (TRIGGER_FOR_TRUNCATE(stmt->events))
278 ereport(ERROR,
279 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
280 errmsg("\"%s\" is a view",
281 RelationGetRelationName(rel)),
282 errdetail("Views cannot have TRUNCATE triggers.")));
284 else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
286 if (stmt->timing != TRIGGER_TYPE_BEFORE &&
287 stmt->timing != TRIGGER_TYPE_AFTER)
288 ereport(ERROR,
289 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
290 errmsg("\"%s\" is a foreign table",
291 RelationGetRelationName(rel)),
292 errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
295 * We disallow constraint triggers to protect the assumption that
296 * triggers on FKs can't be deferred. See notes with AfterTriggers
297 * data structures, below.
299 if (stmt->isconstraint)
300 ereport(ERROR,
301 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
302 errmsg("\"%s\" is a foreign table",
303 RelationGetRelationName(rel)),
304 errdetail("Foreign tables cannot have constraint triggers.")));
306 else
307 ereport(ERROR,
308 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
309 errmsg("relation \"%s\" cannot have triggers",
310 RelationGetRelationName(rel)),
311 errdetail_relkind_not_supported(rel->rd_rel->relkind)));
313 if (!allowSystemTableMods && IsSystemRelation(rel))
314 ereport(ERROR,
315 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
316 errmsg("permission denied: \"%s\" is a system catalog",
317 RelationGetRelationName(rel))));
319 if (stmt->isconstraint)
322 * We must take a lock on the target relation to protect against
323 * concurrent drop. It's not clear that AccessShareLock is strong
324 * enough, but we certainly need at least that much... otherwise, we
325 * might end up creating a pg_constraint entry referencing a
326 * nonexistent table.
328 if (OidIsValid(refRelOid))
330 LockRelationOid(refRelOid, AccessShareLock);
331 constrrelid = refRelOid;
333 else if (stmt->constrrel != NULL)
334 constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
335 false);
338 /* permission checks */
339 if (!isInternal)
341 aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
342 ACL_TRIGGER);
343 if (aclresult != ACLCHECK_OK)
344 aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
345 RelationGetRelationName(rel));
347 if (OidIsValid(constrrelid))
349 aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
350 ACL_TRIGGER);
351 if (aclresult != ACLCHECK_OK)
352 aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)),
353 get_rel_name(constrrelid));
358 * When called on a partitioned table to create a FOR EACH ROW trigger
359 * that's not internal, we create one trigger for each partition, too.
361 * For that, we'd better hold lock on all of them ahead of time.
363 partition_recurse = !isInternal && stmt->row &&
364 rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
365 if (partition_recurse)
366 list_free(find_all_inheritors(RelationGetRelid(rel),
367 ShareRowExclusiveLock, NULL));
369 /* Compute tgtype */
370 TRIGGER_CLEAR_TYPE(tgtype);
371 if (stmt->row)
372 TRIGGER_SETT_ROW(tgtype);
373 tgtype |= stmt->timing;
374 tgtype |= stmt->events;
376 /* Disallow ROW-level TRUNCATE triggers */
377 if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
378 ereport(ERROR,
379 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
380 errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
382 /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
383 if (TRIGGER_FOR_INSTEAD(tgtype))
385 if (!TRIGGER_FOR_ROW(tgtype))
386 ereport(ERROR,
387 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
388 errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
389 if (stmt->whenClause)
390 ereport(ERROR,
391 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
392 errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
393 if (stmt->columns != NIL)
394 ereport(ERROR,
395 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
396 errmsg("INSTEAD OF triggers cannot have column lists")));
400 * We don't yet support naming ROW transition variables, but the parser
401 * recognizes the syntax so we can give a nicer message here.
403 * Per standard, REFERENCING TABLE names are only allowed on AFTER
404 * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
405 * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
406 * only allowed once. Per standard, OLD may not be specified when
407 * creating a trigger only for INSERT, and NEW may not be specified when
408 * creating a trigger only for DELETE.
410 * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
411 * reference both ROW and TABLE transition data.
413 if (stmt->transitionRels != NIL)
415 List *varList = stmt->transitionRels;
416 ListCell *lc;
418 foreach(lc, varList)
420 TriggerTransition *tt = lfirst_node(TriggerTransition, lc);
422 if (!(tt->isTable))
423 ereport(ERROR,
424 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
425 errmsg("ROW variable naming in the REFERENCING clause is not supported"),
426 errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
429 * Because of the above test, we omit further ROW-related testing
430 * below. If we later allow naming OLD and NEW ROW variables,
431 * adjustments will be needed below.
434 if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
435 ereport(ERROR,
436 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
437 errmsg("\"%s\" is a foreign table",
438 RelationGetRelationName(rel)),
439 errdetail("Triggers on foreign tables cannot have transition tables.")));
441 if (rel->rd_rel->relkind == RELKIND_VIEW)
442 ereport(ERROR,
443 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
444 errmsg("\"%s\" is a view",
445 RelationGetRelationName(rel)),
446 errdetail("Triggers on views cannot have transition tables.")));
449 * We currently don't allow row-level triggers with transition
450 * tables on partition or inheritance children. Such triggers
451 * would somehow need to see tuples converted to the format of the
452 * table they're attached to, and it's not clear which subset of
453 * tuples each child should see. See also the prohibitions in
454 * ATExecAttachPartition() and ATExecAddInherit().
456 if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
458 /* Use appropriate error message. */
459 if (rel->rd_rel->relispartition)
460 ereport(ERROR,
461 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
462 errmsg("ROW triggers with transition tables are not supported on partitions")));
463 else
464 ereport(ERROR,
465 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
466 errmsg("ROW triggers with transition tables are not supported on inheritance children")));
469 if (stmt->timing != TRIGGER_TYPE_AFTER)
470 ereport(ERROR,
471 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
472 errmsg("transition table name can only be specified for an AFTER trigger")));
474 if (TRIGGER_FOR_TRUNCATE(tgtype))
475 ereport(ERROR,
476 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
477 errmsg("TRUNCATE triggers with transition tables are not supported")));
480 * We currently don't allow multi-event triggers ("INSERT OR
481 * UPDATE") with transition tables, because it's not clear how to
482 * handle INSERT ... ON CONFLICT statements which can fire both
483 * INSERT and UPDATE triggers. We show the inserted tuples to
484 * INSERT triggers and the updated tuples to UPDATE triggers, but
485 * it's not yet clear what INSERT OR UPDATE trigger should see.
486 * This restriction could be lifted if we can decide on the right
487 * semantics in a later release.
489 if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
490 (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
491 (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
492 ereport(ERROR,
493 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
494 errmsg("transition tables cannot be specified for triggers with more than one event")));
497 * We currently don't allow column-specific triggers with
498 * transition tables. Per spec, that seems to require
499 * accumulating separate transition tables for each combination of
500 * columns, which is a lot of work for a rather marginal feature.
502 if (stmt->columns != NIL)
503 ereport(ERROR,
504 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
505 errmsg("transition tables cannot be specified for triggers with column lists")));
508 * We disallow constraint triggers with transition tables, to
509 * protect the assumption that such triggers can't be deferred.
510 * See notes with AfterTriggers data structures, below.
512 * Currently this is enforced by the grammar, so just Assert here.
514 Assert(!stmt->isconstraint);
516 if (tt->isNew)
518 if (!(TRIGGER_FOR_INSERT(tgtype) ||
519 TRIGGER_FOR_UPDATE(tgtype)))
520 ereport(ERROR,
521 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
522 errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
524 if (newtablename != NULL)
525 ereport(ERROR,
526 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
527 errmsg("NEW TABLE cannot be specified multiple times")));
529 newtablename = tt->name;
531 else
533 if (!(TRIGGER_FOR_DELETE(tgtype) ||
534 TRIGGER_FOR_UPDATE(tgtype)))
535 ereport(ERROR,
536 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
537 errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
539 if (oldtablename != NULL)
540 ereport(ERROR,
541 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
542 errmsg("OLD TABLE cannot be specified multiple times")));
544 oldtablename = tt->name;
548 if (newtablename != NULL && oldtablename != NULL &&
549 strcmp(newtablename, oldtablename) == 0)
550 ereport(ERROR,
551 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
552 errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
556 * Parse the WHEN clause, if any and we weren't passed an already
557 * transformed one.
559 * Note that as a side effect, we fill whenRtable when parsing. If we got
560 * an already parsed clause, this does not occur, which is what we want --
561 * no point in adding redundant dependencies below.
563 if (!whenClause && stmt->whenClause)
565 ParseState *pstate;
566 ParseNamespaceItem *nsitem;
567 List *varList;
568 ListCell *lc;
570 /* Set up a pstate to parse with */
571 pstate = make_parsestate(NULL);
572 pstate->p_sourcetext = queryString;
575 * Set up nsitems for OLD and NEW references.
577 * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
579 nsitem = addRangeTableEntryForRelation(pstate, rel,
580 AccessShareLock,
581 makeAlias("old", NIL),
582 false, false);
583 addNSItemToQuery(pstate, nsitem, false, true, true);
584 nsitem = addRangeTableEntryForRelation(pstate, rel,
585 AccessShareLock,
586 makeAlias("new", NIL),
587 false, false);
588 addNSItemToQuery(pstate, nsitem, false, true, true);
590 /* Transform expression. Copy to be sure we don't modify original */
591 whenClause = transformWhereClause(pstate,
592 copyObject(stmt->whenClause),
593 EXPR_KIND_TRIGGER_WHEN,
594 "WHEN");
595 /* we have to fix its collations too */
596 assign_expr_collations(pstate, whenClause);
599 * Check for disallowed references to OLD/NEW.
601 * NB: pull_var_clause is okay here only because we don't allow
602 * subselects in WHEN clauses; it would fail to examine the contents
603 * of subselects.
605 varList = pull_var_clause(whenClause, 0);
606 foreach(lc, varList)
608 Var *var = (Var *) lfirst(lc);
610 switch (var->varno)
612 case PRS2_OLD_VARNO:
613 if (!TRIGGER_FOR_ROW(tgtype))
614 ereport(ERROR,
615 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
616 errmsg("statement trigger's WHEN condition cannot reference column values"),
617 parser_errposition(pstate, var->location)));
618 if (TRIGGER_FOR_INSERT(tgtype))
619 ereport(ERROR,
620 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
621 errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
622 parser_errposition(pstate, var->location)));
623 /* system columns are okay here */
624 break;
625 case PRS2_NEW_VARNO:
626 if (!TRIGGER_FOR_ROW(tgtype))
627 ereport(ERROR,
628 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
629 errmsg("statement trigger's WHEN condition cannot reference column values"),
630 parser_errposition(pstate, var->location)));
631 if (TRIGGER_FOR_DELETE(tgtype))
632 ereport(ERROR,
633 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
634 errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
635 parser_errposition(pstate, var->location)));
636 if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
637 ereport(ERROR,
638 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
639 errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
640 parser_errposition(pstate, var->location)));
641 if (TRIGGER_FOR_BEFORE(tgtype) &&
642 var->varattno == 0 &&
643 RelationGetDescr(rel)->constr &&
644 RelationGetDescr(rel)->constr->has_generated_stored)
645 ereport(ERROR,
646 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
647 errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
648 errdetail("A whole-row reference is used and the table contains generated columns."),
649 parser_errposition(pstate, var->location)));
650 if (TRIGGER_FOR_BEFORE(tgtype) &&
651 var->varattno > 0 &&
652 TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attgenerated)
653 ereport(ERROR,
654 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
655 errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
656 errdetail("Column \"%s\" is a generated column.",
657 NameStr(TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attname)),
658 parser_errposition(pstate, var->location)));
659 break;
660 default:
661 /* can't happen without add_missing_from, so just elog */
662 elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
663 break;
667 /* we'll need the rtable for recordDependencyOnExpr */
668 whenRtable = pstate->p_rtable;
670 qual = nodeToString(whenClause);
672 free_parsestate(pstate);
674 else if (!whenClause)
676 whenClause = NULL;
677 whenRtable = NIL;
678 qual = NULL;
680 else
682 qual = nodeToString(whenClause);
683 whenRtable = NIL;
687 * Find and validate the trigger function.
689 if (!OidIsValid(funcoid))
690 funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
691 if (!isInternal)
693 aclresult = object_aclcheck(ProcedureRelationId, funcoid, GetUserId(), ACL_EXECUTE);
694 if (aclresult != ACLCHECK_OK)
695 aclcheck_error(aclresult, OBJECT_FUNCTION,
696 NameListToString(stmt->funcname));
698 funcrettype = get_func_rettype(funcoid);
699 if (funcrettype != TRIGGEROID)
700 ereport(ERROR,
701 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
702 errmsg("function %s must return type %s",
703 NameListToString(stmt->funcname), "trigger")));
706 * Scan pg_trigger to see if there is already a trigger of the same name.
707 * Skip this for internally generated triggers, since we'll modify the
708 * name to be unique below.
710 * NOTE that this is cool only because we have ShareRowExclusiveLock on
711 * the relation, so the trigger set won't be changing underneath us.
713 tgrel = table_open(TriggerRelationId, RowExclusiveLock);
714 if (!isInternal)
716 ScanKeyData skeys[2];
717 SysScanDesc tgscan;
719 ScanKeyInit(&skeys[0],
720 Anum_pg_trigger_tgrelid,
721 BTEqualStrategyNumber, F_OIDEQ,
722 ObjectIdGetDatum(RelationGetRelid(rel)));
724 ScanKeyInit(&skeys[1],
725 Anum_pg_trigger_tgname,
726 BTEqualStrategyNumber, F_NAMEEQ,
727 CStringGetDatum(stmt->trigname));
729 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
730 NULL, 2, skeys);
732 /* There should be at most one matching tuple */
733 if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
735 Form_pg_trigger oldtrigger = (Form_pg_trigger) GETSTRUCT(tuple);
737 trigoid = oldtrigger->oid;
738 existing_constraint_oid = oldtrigger->tgconstraint;
739 existing_isInternal = oldtrigger->tgisinternal;
740 existing_isClone = OidIsValid(oldtrigger->tgparentid);
741 trigger_exists = true;
742 /* copy the tuple to use in CatalogTupleUpdate() */
743 tuple = heap_copytuple(tuple);
745 systable_endscan(tgscan);
748 if (!trigger_exists)
750 /* Generate the OID for the new trigger. */
751 trigoid = GetNewOidWithIndex(tgrel, TriggerOidIndexId,
752 Anum_pg_trigger_oid);
754 else
757 * If OR REPLACE was specified, we'll replace the old trigger;
758 * otherwise complain about the duplicate name.
760 if (!stmt->replace)
761 ereport(ERROR,
762 (errcode(ERRCODE_DUPLICATE_OBJECT),
763 errmsg("trigger \"%s\" for relation \"%s\" already exists",
764 stmt->trigname, RelationGetRelationName(rel))));
767 * An internal trigger or a child trigger (isClone) cannot be replaced
768 * by a user-defined trigger. However, skip this test when
769 * in_partition, because then we're recursing from a partitioned table
770 * and the check was made at the parent level.
772 if ((existing_isInternal || existing_isClone) &&
773 !isInternal && !in_partition)
774 ereport(ERROR,
775 (errcode(ERRCODE_DUPLICATE_OBJECT),
776 errmsg("trigger \"%s\" for relation \"%s\" is an internal or a child trigger",
777 stmt->trigname, RelationGetRelationName(rel))));
780 * It is not allowed to replace with a constraint trigger; gram.y
781 * should have enforced this already.
783 Assert(!stmt->isconstraint);
786 * It is not allowed to replace an existing constraint trigger,
787 * either. (The reason for these restrictions is partly that it seems
788 * difficult to deal with pending trigger events in such cases, and
789 * partly that the command might imply changing the constraint's
790 * properties as well, which doesn't seem nice.)
792 if (OidIsValid(existing_constraint_oid))
793 ereport(ERROR,
794 (errcode(ERRCODE_DUPLICATE_OBJECT),
795 errmsg("trigger \"%s\" for relation \"%s\" is a constraint trigger",
796 stmt->trigname, RelationGetRelationName(rel))));
800 * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
801 * corresponding pg_constraint entry.
803 if (stmt->isconstraint && !OidIsValid(constraintOid))
805 /* Internal callers should have made their own constraints */
806 Assert(!isInternal);
807 constraintOid = CreateConstraintEntry(stmt->trigname,
808 RelationGetNamespace(rel),
809 CONSTRAINT_TRIGGER,
810 stmt->deferrable,
811 stmt->initdeferred,
812 true, /* Is Enforced */
813 true,
814 InvalidOid, /* no parent */
815 RelationGetRelid(rel),
816 NULL, /* no conkey */
819 InvalidOid, /* no domain */
820 InvalidOid, /* no index */
821 InvalidOid, /* no foreign key */
822 NULL,
823 NULL,
824 NULL,
825 NULL,
827 ' ',
828 ' ',
829 NULL,
831 ' ',
832 NULL, /* no exclusion */
833 NULL, /* no check constraint */
834 NULL,
835 true, /* islocal */
836 0, /* inhcount */
837 true, /* noinherit */
838 false, /* conperiod */
839 isInternal); /* is_internal */
843 * If trigger is internally generated, modify the provided trigger name to
844 * ensure uniqueness by appending the trigger OID. (Callers will usually
845 * supply a simple constant trigger name in these cases.)
847 if (isInternal)
849 snprintf(internaltrigname, sizeof(internaltrigname),
850 "%s_%u", stmt->trigname, trigoid);
851 trigname = internaltrigname;
853 else
855 /* user-defined trigger; use the specified trigger name as-is */
856 trigname = stmt->trigname;
860 * Build the new pg_trigger tuple.
862 memset(nulls, false, sizeof(nulls));
864 values[Anum_pg_trigger_oid - 1] = ObjectIdGetDatum(trigoid);
865 values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
866 values[Anum_pg_trigger_tgparentid - 1] = ObjectIdGetDatum(parentTriggerOid);
867 values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
868 CStringGetDatum(trigname));
869 values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
870 values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
871 values[Anum_pg_trigger_tgenabled - 1] = trigger_fires_when;
872 values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
873 values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
874 values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
875 values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
876 values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
877 values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
879 if (stmt->args)
881 ListCell *le;
882 char *args;
883 int16 nargs = list_length(stmt->args);
884 int len = 0;
886 foreach(le, stmt->args)
888 char *ar = strVal(lfirst(le));
890 len += strlen(ar) + 4;
891 for (; *ar; ar++)
893 if (*ar == '\\')
894 len++;
897 args = (char *) palloc(len + 1);
898 args[0] = '\0';
899 foreach(le, stmt->args)
901 char *s = strVal(lfirst(le));
902 char *d = args + strlen(args);
904 while (*s)
906 if (*s == '\\')
907 *d++ = '\\';
908 *d++ = *s++;
910 strcpy(d, "\\000");
912 values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
913 values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
914 CStringGetDatum(args));
916 else
918 values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
919 values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
920 CStringGetDatum(""));
923 /* build column number array if it's a column-specific trigger */
924 ncolumns = list_length(stmt->columns);
925 if (ncolumns == 0)
926 columns = NULL;
927 else
929 ListCell *cell;
930 int i = 0;
932 columns = (int16 *) palloc(ncolumns * sizeof(int16));
933 foreach(cell, stmt->columns)
935 char *name = strVal(lfirst(cell));
936 int16 attnum;
937 int j;
939 /* Lookup column name. System columns are not allowed */
940 attnum = attnameAttNum(rel, name, false);
941 if (attnum == InvalidAttrNumber)
942 ereport(ERROR,
943 (errcode(ERRCODE_UNDEFINED_COLUMN),
944 errmsg("column \"%s\" of relation \"%s\" does not exist",
945 name, RelationGetRelationName(rel))));
947 /* Check for duplicates */
948 for (j = i - 1; j >= 0; j--)
950 if (columns[j] == attnum)
951 ereport(ERROR,
952 (errcode(ERRCODE_DUPLICATE_COLUMN),
953 errmsg("column \"%s\" specified more than once",
954 name)));
957 columns[i++] = attnum;
960 tgattr = buildint2vector(columns, ncolumns);
961 values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
963 /* set tgqual if trigger has WHEN clause */
964 if (qual)
965 values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
966 else
967 nulls[Anum_pg_trigger_tgqual - 1] = true;
969 if (oldtablename)
970 values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
971 CStringGetDatum(oldtablename));
972 else
973 nulls[Anum_pg_trigger_tgoldtable - 1] = true;
974 if (newtablename)
975 values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
976 CStringGetDatum(newtablename));
977 else
978 nulls[Anum_pg_trigger_tgnewtable - 1] = true;
981 * Insert or replace tuple in pg_trigger.
983 if (!trigger_exists)
985 tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
986 CatalogTupleInsert(tgrel, tuple);
988 else
990 HeapTuple newtup;
992 newtup = heap_form_tuple(tgrel->rd_att, values, nulls);
993 CatalogTupleUpdate(tgrel, &tuple->t_self, newtup);
994 heap_freetuple(newtup);
997 heap_freetuple(tuple); /* free either original or new tuple */
998 table_close(tgrel, RowExclusiveLock);
1000 pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
1001 pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
1002 pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
1003 if (oldtablename)
1004 pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
1005 if (newtablename)
1006 pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
1009 * Update relation's pg_class entry; if necessary; and if not, send an SI
1010 * message to make other backends (and this one) rebuild relcache entries.
1012 pgrel = table_open(RelationRelationId, RowExclusiveLock);
1013 tuple = SearchSysCacheCopy1(RELOID,
1014 ObjectIdGetDatum(RelationGetRelid(rel)));
1015 if (!HeapTupleIsValid(tuple))
1016 elog(ERROR, "cache lookup failed for relation %u",
1017 RelationGetRelid(rel));
1018 if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers)
1020 ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
1022 CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
1024 CommandCounterIncrement();
1026 else
1027 CacheInvalidateRelcacheByTuple(tuple);
1029 heap_freetuple(tuple);
1030 table_close(pgrel, RowExclusiveLock);
1033 * If we're replacing a trigger, flush all the old dependencies before
1034 * recording new ones.
1036 if (trigger_exists)
1037 deleteDependencyRecordsFor(TriggerRelationId, trigoid, true);
1040 * Record dependencies for trigger. Always place a normal dependency on
1041 * the function.
1043 myself.classId = TriggerRelationId;
1044 myself.objectId = trigoid;
1045 myself.objectSubId = 0;
1047 referenced.classId = ProcedureRelationId;
1048 referenced.objectId = funcoid;
1049 referenced.objectSubId = 0;
1050 recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1052 if (isInternal && OidIsValid(constraintOid))
1055 * Internally-generated trigger for a constraint, so make it an
1056 * internal dependency of the constraint. We can skip depending on
1057 * the relation(s), as there'll be an indirect dependency via the
1058 * constraint.
1060 referenced.classId = ConstraintRelationId;
1061 referenced.objectId = constraintOid;
1062 referenced.objectSubId = 0;
1063 recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
1065 else
1068 * User CREATE TRIGGER, so place dependencies. We make trigger be
1069 * auto-dropped if its relation is dropped or if the FK relation is
1070 * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
1072 referenced.classId = RelationRelationId;
1073 referenced.objectId = RelationGetRelid(rel);
1074 referenced.objectSubId = 0;
1075 recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1077 if (OidIsValid(constrrelid))
1079 referenced.classId = RelationRelationId;
1080 referenced.objectId = constrrelid;
1081 referenced.objectSubId = 0;
1082 recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1084 /* Not possible to have an index dependency in this case */
1085 Assert(!OidIsValid(indexOid));
1088 * If it's a user-specified constraint trigger, make the constraint
1089 * internally dependent on the trigger instead of vice versa.
1091 if (OidIsValid(constraintOid))
1093 referenced.classId = ConstraintRelationId;
1094 referenced.objectId = constraintOid;
1095 referenced.objectSubId = 0;
1096 recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
1100 * If it's a partition trigger, create the partition dependencies.
1102 if (OidIsValid(parentTriggerOid))
1104 ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid);
1105 recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
1106 ObjectAddressSet(referenced, RelationRelationId, RelationGetRelid(rel));
1107 recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
1111 /* If column-specific trigger, add normal dependencies on columns */
1112 if (columns != NULL)
1114 int i;
1116 referenced.classId = RelationRelationId;
1117 referenced.objectId = RelationGetRelid(rel);
1118 for (i = 0; i < ncolumns; i++)
1120 referenced.objectSubId = columns[i];
1121 recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1126 * If it has a WHEN clause, add dependencies on objects mentioned in the
1127 * expression (eg, functions, as well as any columns used).
1129 if (whenRtable != NIL)
1130 recordDependencyOnExpr(&myself, whenClause, whenRtable,
1131 DEPENDENCY_NORMAL);
1133 /* Post creation hook for new trigger */
1134 InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
1135 isInternal);
1138 * Lastly, create the trigger on child relations, if needed.
1140 if (partition_recurse)
1142 PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1143 int i;
1144 MemoryContext oldcxt,
1145 perChildCxt;
1147 perChildCxt = AllocSetContextCreate(CurrentMemoryContext,
1148 "part trig clone",
1149 ALLOCSET_SMALL_SIZES);
1152 * We don't currently expect to be called with a valid indexOid. If
1153 * that ever changes then we'll need to write code here to find the
1154 * corresponding child index.
1156 Assert(!OidIsValid(indexOid));
1158 oldcxt = MemoryContextSwitchTo(perChildCxt);
1160 /* Iterate to create the trigger on each existing partition */
1161 for (i = 0; i < partdesc->nparts; i++)
1163 CreateTrigStmt *childStmt;
1164 Relation childTbl;
1165 Node *qual;
1167 childTbl = table_open(partdesc->oids[i], ShareRowExclusiveLock);
1170 * Initialize our fabricated parse node by copying the original
1171 * one, then resetting fields that we pass separately.
1173 childStmt = copyObject(stmt);
1174 childStmt->funcname = NIL;
1175 childStmt->whenClause = NULL;
1177 /* If there is a WHEN clause, create a modified copy of it */
1178 qual = copyObject(whenClause);
1179 qual = (Node *)
1180 map_partition_varattnos((List *) qual, PRS2_OLD_VARNO,
1181 childTbl, rel);
1182 qual = (Node *)
1183 map_partition_varattnos((List *) qual, PRS2_NEW_VARNO,
1184 childTbl, rel);
1186 CreateTriggerFiringOn(childStmt, queryString,
1187 partdesc->oids[i], refRelOid,
1188 InvalidOid, InvalidOid,
1189 funcoid, trigoid, qual,
1190 isInternal, true, trigger_fires_when);
1192 table_close(childTbl, NoLock);
1194 MemoryContextReset(perChildCxt);
1197 MemoryContextSwitchTo(oldcxt);
1198 MemoryContextDelete(perChildCxt);
1201 /* Keep lock on target rel until end of xact */
1202 table_close(rel, NoLock);
1204 return myself;
1208 * TriggerSetParentTrigger
1209 * Set a partition's trigger as child of its parent trigger,
1210 * or remove the linkage if parentTrigId is InvalidOid.
1212 * This updates the constraint's pg_trigger row to show it as inherited, and
1213 * adds PARTITION dependencies to prevent the trigger from being deleted
1214 * on its own. Alternatively, reverse that.
1216 void
1217 TriggerSetParentTrigger(Relation trigRel,
1218 Oid childTrigId,
1219 Oid parentTrigId,
1220 Oid childTableId)
1222 SysScanDesc tgscan;
1223 ScanKeyData skey[1];
1224 Form_pg_trigger trigForm;
1225 HeapTuple tuple,
1226 newtup;
1227 ObjectAddress depender;
1228 ObjectAddress referenced;
1231 * Find the trigger to delete.
1233 ScanKeyInit(&skey[0],
1234 Anum_pg_trigger_oid,
1235 BTEqualStrategyNumber, F_OIDEQ,
1236 ObjectIdGetDatum(childTrigId));
1238 tgscan = systable_beginscan(trigRel, TriggerOidIndexId, true,
1239 NULL, 1, skey);
1241 tuple = systable_getnext(tgscan);
1242 if (!HeapTupleIsValid(tuple))
1243 elog(ERROR, "could not find tuple for trigger %u", childTrigId);
1244 newtup = heap_copytuple(tuple);
1245 trigForm = (Form_pg_trigger) GETSTRUCT(newtup);
1246 if (OidIsValid(parentTrigId))
1248 /* don't allow setting parent for a constraint that already has one */
1249 if (OidIsValid(trigForm->tgparentid))
1250 elog(ERROR, "trigger %u already has a parent trigger",
1251 childTrigId);
1253 trigForm->tgparentid = parentTrigId;
1255 CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1257 ObjectAddressSet(depender, TriggerRelationId, childTrigId);
1259 ObjectAddressSet(referenced, TriggerRelationId, parentTrigId);
1260 recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_PRI);
1262 ObjectAddressSet(referenced, RelationRelationId, childTableId);
1263 recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_SEC);
1265 else
1267 trigForm->tgparentid = InvalidOid;
1269 CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1271 deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1272 TriggerRelationId,
1273 DEPENDENCY_PARTITION_PRI);
1274 deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1275 RelationRelationId,
1276 DEPENDENCY_PARTITION_SEC);
1279 heap_freetuple(newtup);
1280 systable_endscan(tgscan);
1285 * Guts of trigger deletion.
1287 void
1288 RemoveTriggerById(Oid trigOid)
1290 Relation tgrel;
1291 SysScanDesc tgscan;
1292 ScanKeyData skey[1];
1293 HeapTuple tup;
1294 Oid relid;
1295 Relation rel;
1297 tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1300 * Find the trigger to delete.
1302 ScanKeyInit(&skey[0],
1303 Anum_pg_trigger_oid,
1304 BTEqualStrategyNumber, F_OIDEQ,
1305 ObjectIdGetDatum(trigOid));
1307 tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1308 NULL, 1, skey);
1310 tup = systable_getnext(tgscan);
1311 if (!HeapTupleIsValid(tup))
1312 elog(ERROR, "could not find tuple for trigger %u", trigOid);
1315 * Open and exclusive-lock the relation the trigger belongs to.
1317 relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1319 rel = table_open(relid, AccessExclusiveLock);
1321 if (rel->rd_rel->relkind != RELKIND_RELATION &&
1322 rel->rd_rel->relkind != RELKIND_VIEW &&
1323 rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1324 rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1325 ereport(ERROR,
1326 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1327 errmsg("relation \"%s\" cannot have triggers",
1328 RelationGetRelationName(rel)),
1329 errdetail_relkind_not_supported(rel->rd_rel->relkind)));
1331 if (!allowSystemTableMods && IsSystemRelation(rel))
1332 ereport(ERROR,
1333 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1334 errmsg("permission denied: \"%s\" is a system catalog",
1335 RelationGetRelationName(rel))));
1338 * Delete the pg_trigger tuple.
1340 CatalogTupleDelete(tgrel, &tup->t_self);
1342 systable_endscan(tgscan);
1343 table_close(tgrel, RowExclusiveLock);
1346 * We do not bother to try to determine whether any other triggers remain,
1347 * which would be needed in order to decide whether it's safe to clear the
1348 * relation's relhastriggers. (In any case, there might be a concurrent
1349 * process adding new triggers.) Instead, just force a relcache inval to
1350 * make other backends (and this one too!) rebuild their relcache entries.
1351 * There's no great harm in leaving relhastriggers true even if there are
1352 * no triggers left.
1354 CacheInvalidateRelcache(rel);
1356 /* Keep lock on trigger's rel until end of xact */
1357 table_close(rel, NoLock);
1361 * get_trigger_oid - Look up a trigger by name to find its OID.
1363 * If missing_ok is false, throw an error if trigger not found. If
1364 * true, just return InvalidOid.
1367 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1369 Relation tgrel;
1370 ScanKeyData skey[2];
1371 SysScanDesc tgscan;
1372 HeapTuple tup;
1373 Oid oid;
1376 * Find the trigger, verify permissions, set up object address
1378 tgrel = table_open(TriggerRelationId, AccessShareLock);
1380 ScanKeyInit(&skey[0],
1381 Anum_pg_trigger_tgrelid,
1382 BTEqualStrategyNumber, F_OIDEQ,
1383 ObjectIdGetDatum(relid));
1384 ScanKeyInit(&skey[1],
1385 Anum_pg_trigger_tgname,
1386 BTEqualStrategyNumber, F_NAMEEQ,
1387 CStringGetDatum(trigname));
1389 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1390 NULL, 2, skey);
1392 tup = systable_getnext(tgscan);
1394 if (!HeapTupleIsValid(tup))
1396 if (!missing_ok)
1397 ereport(ERROR,
1398 (errcode(ERRCODE_UNDEFINED_OBJECT),
1399 errmsg("trigger \"%s\" for table \"%s\" does not exist",
1400 trigname, get_rel_name(relid))));
1401 oid = InvalidOid;
1403 else
1405 oid = ((Form_pg_trigger) GETSTRUCT(tup))->oid;
1408 systable_endscan(tgscan);
1409 table_close(tgrel, AccessShareLock);
1410 return oid;
1414 * Perform permissions and integrity checks before acquiring a relation lock.
1416 static void
1417 RangeVarCallbackForRenameTrigger(const RangeVar *rv, Oid relid, Oid oldrelid,
1418 void *arg)
1420 HeapTuple tuple;
1421 Form_pg_class form;
1423 tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1424 if (!HeapTupleIsValid(tuple))
1425 return; /* concurrently dropped */
1426 form = (Form_pg_class) GETSTRUCT(tuple);
1428 /* only tables and views can have triggers */
1429 if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1430 form->relkind != RELKIND_FOREIGN_TABLE &&
1431 form->relkind != RELKIND_PARTITIONED_TABLE)
1432 ereport(ERROR,
1433 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1434 errmsg("relation \"%s\" cannot have triggers",
1435 rv->relname),
1436 errdetail_relkind_not_supported(form->relkind)));
1438 /* you must own the table to rename one of its triggers */
1439 if (!object_ownercheck(RelationRelationId, relid, GetUserId()))
1440 aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(relid)), rv->relname);
1441 if (!allowSystemTableMods && IsSystemClass(relid, form))
1442 ereport(ERROR,
1443 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1444 errmsg("permission denied: \"%s\" is a system catalog",
1445 rv->relname)));
1447 ReleaseSysCache(tuple);
1451 * renametrig - changes the name of a trigger on a relation
1453 * trigger name is changed in trigger catalog.
1454 * No record of the previous name is kept.
1456 * get proper relrelation from relation catalog (if not arg)
1457 * scan trigger catalog
1458 * for name conflict (within rel)
1459 * for original trigger (if not arg)
1460 * modify tgname in trigger tuple
1461 * update row in catalog
1463 ObjectAddress
1464 renametrig(RenameStmt *stmt)
1466 Oid tgoid;
1467 Relation targetrel;
1468 Relation tgrel;
1469 HeapTuple tuple;
1470 SysScanDesc tgscan;
1471 ScanKeyData key[2];
1472 Oid relid;
1473 ObjectAddress address;
1476 * Look up name, check permissions, and acquire lock (which we will NOT
1477 * release until end of transaction).
1479 relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
1481 RangeVarCallbackForRenameTrigger,
1482 NULL);
1484 /* Have lock already, so just need to build relcache entry. */
1485 targetrel = relation_open(relid, NoLock);
1488 * On partitioned tables, this operation recurses to partitions. Lock all
1489 * tables upfront.
1491 if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1492 (void) find_all_inheritors(relid, AccessExclusiveLock, NULL);
1494 tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1497 * Search for the trigger to modify.
1499 ScanKeyInit(&key[0],
1500 Anum_pg_trigger_tgrelid,
1501 BTEqualStrategyNumber, F_OIDEQ,
1502 ObjectIdGetDatum(relid));
1503 ScanKeyInit(&key[1],
1504 Anum_pg_trigger_tgname,
1505 BTEqualStrategyNumber, F_NAMEEQ,
1506 PointerGetDatum(stmt->subname));
1507 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1508 NULL, 2, key);
1509 if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1511 Form_pg_trigger trigform;
1513 trigform = (Form_pg_trigger) GETSTRUCT(tuple);
1514 tgoid = trigform->oid;
1517 * If the trigger descends from a trigger on a parent partitioned
1518 * table, reject the rename. We don't allow a trigger in a partition
1519 * to differ in name from that of its parent: that would lead to an
1520 * inconsistency that pg_dump would not reproduce.
1522 if (OidIsValid(trigform->tgparentid))
1523 ereport(ERROR,
1524 errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1525 errmsg("cannot rename trigger \"%s\" on table \"%s\"",
1526 stmt->subname, RelationGetRelationName(targetrel)),
1527 errhint("Rename the trigger on the partitioned table \"%s\" instead.",
1528 get_rel_name(get_partition_parent(relid, false))));
1531 /* Rename the trigger on this relation ... */
1532 renametrig_internal(tgrel, targetrel, tuple, stmt->newname,
1533 stmt->subname);
1535 /* ... and if it is partitioned, recurse to its partitions */
1536 if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1538 PartitionDesc partdesc = RelationGetPartitionDesc(targetrel, true);
1540 for (int i = 0; i < partdesc->nparts; i++)
1542 Oid partitionId = partdesc->oids[i];
1544 renametrig_partition(tgrel, partitionId, trigform->oid,
1545 stmt->newname, stmt->subname);
1549 else
1551 ereport(ERROR,
1552 (errcode(ERRCODE_UNDEFINED_OBJECT),
1553 errmsg("trigger \"%s\" for table \"%s\" does not exist",
1554 stmt->subname, RelationGetRelationName(targetrel))));
1557 ObjectAddressSet(address, TriggerRelationId, tgoid);
1559 systable_endscan(tgscan);
1561 table_close(tgrel, RowExclusiveLock);
1564 * Close rel, but keep exclusive lock!
1566 relation_close(targetrel, NoLock);
1568 return address;
1572 * Subroutine for renametrig -- perform the actual work of renaming one
1573 * trigger on one table.
1575 * If the trigger has a name different from the expected one, raise a
1576 * NOTICE about it.
1578 static void
1579 renametrig_internal(Relation tgrel, Relation targetrel, HeapTuple trigtup,
1580 const char *newname, const char *expected_name)
1582 HeapTuple tuple;
1583 Form_pg_trigger tgform;
1584 ScanKeyData key[2];
1585 SysScanDesc tgscan;
1587 /* If the trigger already has the new name, nothing to do. */
1588 tgform = (Form_pg_trigger) GETSTRUCT(trigtup);
1589 if (strcmp(NameStr(tgform->tgname), newname) == 0)
1590 return;
1593 * Before actually trying the rename, search for triggers with the same
1594 * name. The update would fail with an ugly message in that case, and it
1595 * is better to throw a nicer error.
1597 ScanKeyInit(&key[0],
1598 Anum_pg_trigger_tgrelid,
1599 BTEqualStrategyNumber, F_OIDEQ,
1600 ObjectIdGetDatum(RelationGetRelid(targetrel)));
1601 ScanKeyInit(&key[1],
1602 Anum_pg_trigger_tgname,
1603 BTEqualStrategyNumber, F_NAMEEQ,
1604 PointerGetDatum(newname));
1605 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1606 NULL, 2, key);
1607 if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1608 ereport(ERROR,
1609 (errcode(ERRCODE_DUPLICATE_OBJECT),
1610 errmsg("trigger \"%s\" for relation \"%s\" already exists",
1611 newname, RelationGetRelationName(targetrel))));
1612 systable_endscan(tgscan);
1615 * The target name is free; update the existing pg_trigger tuple with it.
1617 tuple = heap_copytuple(trigtup); /* need a modifiable copy */
1618 tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1621 * If the trigger has a name different from what we expected, let the user
1622 * know. (We can proceed anyway, since we must have reached here following
1623 * a tgparentid link.)
1625 if (strcmp(NameStr(tgform->tgname), expected_name) != 0)
1626 ereport(NOTICE,
1627 errmsg("renamed trigger \"%s\" on relation \"%s\"",
1628 NameStr(tgform->tgname),
1629 RelationGetRelationName(targetrel)));
1631 namestrcpy(&tgform->tgname, newname);
1633 CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1635 InvokeObjectPostAlterHook(TriggerRelationId, tgform->oid, 0);
1638 * Invalidate relation's relcache entry so that other backends (and this
1639 * one too!) are sent SI message to make them rebuild relcache entries.
1640 * (Ideally this should happen automatically...)
1642 CacheInvalidateRelcache(targetrel);
1646 * Subroutine for renametrig -- Helper for recursing to partitions when
1647 * renaming triggers on a partitioned table.
1649 static void
1650 renametrig_partition(Relation tgrel, Oid partitionId, Oid parentTriggerOid,
1651 const char *newname, const char *expected_name)
1653 SysScanDesc tgscan;
1654 ScanKeyData key;
1655 HeapTuple tuple;
1658 * Given a relation and the OID of a trigger on parent relation, find the
1659 * corresponding trigger in the child and rename that trigger to the given
1660 * name.
1662 ScanKeyInit(&key,
1663 Anum_pg_trigger_tgrelid,
1664 BTEqualStrategyNumber, F_OIDEQ,
1665 ObjectIdGetDatum(partitionId));
1666 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1667 NULL, 1, &key);
1668 while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1670 Form_pg_trigger tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1671 Relation partitionRel;
1673 if (tgform->tgparentid != parentTriggerOid)
1674 continue; /* not our trigger */
1676 partitionRel = table_open(partitionId, NoLock);
1678 /* Rename the trigger on this partition */
1679 renametrig_internal(tgrel, partitionRel, tuple, newname, expected_name);
1681 /* And if this relation is partitioned, recurse to its partitions */
1682 if (partitionRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1684 PartitionDesc partdesc = RelationGetPartitionDesc(partitionRel,
1685 true);
1687 for (int i = 0; i < partdesc->nparts; i++)
1689 Oid partoid = partdesc->oids[i];
1691 renametrig_partition(tgrel, partoid, tgform->oid, newname,
1692 NameStr(tgform->tgname));
1695 table_close(partitionRel, NoLock);
1697 /* There should be at most one matching tuple */
1698 break;
1700 systable_endscan(tgscan);
1704 * EnableDisableTrigger()
1706 * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1707 * to change 'tgenabled' field for the specified trigger(s)
1709 * rel: relation to process (caller must hold suitable lock on it)
1710 * tgname: name of trigger to process, or NULL to scan all triggers
1711 * tgparent: if not zero, process only triggers with this tgparentid
1712 * fires_when: new value for tgenabled field. In addition to generic
1713 * enablement/disablement, this also defines when the trigger
1714 * should be fired in session replication roles.
1715 * skip_system: if true, skip "system" triggers (constraint triggers)
1716 * recurse: if true, recurse to partitions
1718 * Caller should have checked permissions for the table; here we also
1719 * enforce that superuser privilege is required to alter the state of
1720 * system triggers
1722 void
1723 EnableDisableTrigger(Relation rel, const char *tgname, Oid tgparent,
1724 char fires_when, bool skip_system, bool recurse,
1725 LOCKMODE lockmode)
1727 Relation tgrel;
1728 int nkeys;
1729 ScanKeyData keys[2];
1730 SysScanDesc tgscan;
1731 HeapTuple tuple;
1732 bool found;
1733 bool changed;
1735 /* Scan the relevant entries in pg_triggers */
1736 tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1738 ScanKeyInit(&keys[0],
1739 Anum_pg_trigger_tgrelid,
1740 BTEqualStrategyNumber, F_OIDEQ,
1741 ObjectIdGetDatum(RelationGetRelid(rel)));
1742 if (tgname)
1744 ScanKeyInit(&keys[1],
1745 Anum_pg_trigger_tgname,
1746 BTEqualStrategyNumber, F_NAMEEQ,
1747 CStringGetDatum(tgname));
1748 nkeys = 2;
1750 else
1751 nkeys = 1;
1753 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1754 NULL, nkeys, keys);
1756 found = changed = false;
1758 while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1760 Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1762 if (OidIsValid(tgparent) && tgparent != oldtrig->tgparentid)
1763 continue;
1765 if (oldtrig->tgisinternal)
1767 /* system trigger ... ok to process? */
1768 if (skip_system)
1769 continue;
1770 if (!superuser())
1771 ereport(ERROR,
1772 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1773 errmsg("permission denied: \"%s\" is a system trigger",
1774 NameStr(oldtrig->tgname))));
1777 found = true;
1779 if (oldtrig->tgenabled != fires_when)
1781 /* need to change this one ... make a copy to scribble on */
1782 HeapTuple newtup = heap_copytuple(tuple);
1783 Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1785 newtrig->tgenabled = fires_when;
1787 CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1789 heap_freetuple(newtup);
1791 changed = true;
1795 * When altering FOR EACH ROW triggers on a partitioned table, do the
1796 * same on the partitions as well, unless ONLY is specified.
1798 * Note that we recurse even if we didn't change the trigger above,
1799 * because the partitions' copy of the trigger may have a different
1800 * value of tgenabled than the parent's trigger and thus might need to
1801 * be changed.
1803 if (recurse &&
1804 rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
1805 (TRIGGER_FOR_ROW(oldtrig->tgtype)))
1807 PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1808 int i;
1810 for (i = 0; i < partdesc->nparts; i++)
1812 Relation part;
1814 part = relation_open(partdesc->oids[i], lockmode);
1815 /* Match on child triggers' tgparentid, not their name */
1816 EnableDisableTrigger(part, NULL, oldtrig->oid,
1817 fires_when, skip_system, recurse,
1818 lockmode);
1819 table_close(part, NoLock); /* keep lock till commit */
1823 InvokeObjectPostAlterHook(TriggerRelationId,
1824 oldtrig->oid, 0);
1827 systable_endscan(tgscan);
1829 table_close(tgrel, RowExclusiveLock);
1831 if (tgname && !found)
1832 ereport(ERROR,
1833 (errcode(ERRCODE_UNDEFINED_OBJECT),
1834 errmsg("trigger \"%s\" for table \"%s\" does not exist",
1835 tgname, RelationGetRelationName(rel))));
1838 * If we changed anything, broadcast a SI inval message to force each
1839 * backend (including our own!) to rebuild relation's relcache entry.
1840 * Otherwise they will fail to apply the change promptly.
1842 if (changed)
1843 CacheInvalidateRelcache(rel);
1848 * Build trigger data to attach to the given relcache entry.
1850 * Note that trigger data attached to a relcache entry must be stored in
1851 * CacheMemoryContext to ensure it survives as long as the relcache entry.
1852 * But we should be running in a less long-lived working context. To avoid
1853 * leaking cache memory if this routine fails partway through, we build a
1854 * temporary TriggerDesc in working memory and then copy the completed
1855 * structure into cache memory.
1857 void
1858 RelationBuildTriggers(Relation relation)
1860 TriggerDesc *trigdesc;
1861 int numtrigs;
1862 int maxtrigs;
1863 Trigger *triggers;
1864 Relation tgrel;
1865 ScanKeyData skey;
1866 SysScanDesc tgscan;
1867 HeapTuple htup;
1868 MemoryContext oldContext;
1869 int i;
1872 * Allocate a working array to hold the triggers (the array is extended if
1873 * necessary)
1875 maxtrigs = 16;
1876 triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1877 numtrigs = 0;
1880 * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1881 * be reading the triggers in name order, except possibly during
1882 * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1883 * ensures that triggers will be fired in name order.
1885 ScanKeyInit(&skey,
1886 Anum_pg_trigger_tgrelid,
1887 BTEqualStrategyNumber, F_OIDEQ,
1888 ObjectIdGetDatum(RelationGetRelid(relation)));
1890 tgrel = table_open(TriggerRelationId, AccessShareLock);
1891 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1892 NULL, 1, &skey);
1894 while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1896 Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1897 Trigger *build;
1898 Datum datum;
1899 bool isnull;
1901 if (numtrigs >= maxtrigs)
1903 maxtrigs *= 2;
1904 triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1906 build = &(triggers[numtrigs]);
1908 build->tgoid = pg_trigger->oid;
1909 build->tgname = DatumGetCString(DirectFunctionCall1(nameout,
1910 NameGetDatum(&pg_trigger->tgname)));
1911 build->tgfoid = pg_trigger->tgfoid;
1912 build->tgtype = pg_trigger->tgtype;
1913 build->tgenabled = pg_trigger->tgenabled;
1914 build->tgisinternal = pg_trigger->tgisinternal;
1915 build->tgisclone = OidIsValid(pg_trigger->tgparentid);
1916 build->tgconstrrelid = pg_trigger->tgconstrrelid;
1917 build->tgconstrindid = pg_trigger->tgconstrindid;
1918 build->tgconstraint = pg_trigger->tgconstraint;
1919 build->tgdeferrable = pg_trigger->tgdeferrable;
1920 build->tginitdeferred = pg_trigger->tginitdeferred;
1921 build->tgnargs = pg_trigger->tgnargs;
1922 /* tgattr is first var-width field, so OK to access directly */
1923 build->tgnattr = pg_trigger->tgattr.dim1;
1924 if (build->tgnattr > 0)
1926 build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1927 memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1928 build->tgnattr * sizeof(int16));
1930 else
1931 build->tgattr = NULL;
1932 if (build->tgnargs > 0)
1934 bytea *val;
1935 char *p;
1937 val = DatumGetByteaPP(fastgetattr(htup,
1938 Anum_pg_trigger_tgargs,
1939 tgrel->rd_att, &isnull));
1940 if (isnull)
1941 elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1942 RelationGetRelationName(relation));
1943 p = (char *) VARDATA_ANY(val);
1944 build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1945 for (i = 0; i < build->tgnargs; i++)
1947 build->tgargs[i] = pstrdup(p);
1948 p += strlen(p) + 1;
1951 else
1952 build->tgargs = NULL;
1954 datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
1955 tgrel->rd_att, &isnull);
1956 if (!isnull)
1957 build->tgoldtable =
1958 DatumGetCString(DirectFunctionCall1(nameout, datum));
1959 else
1960 build->tgoldtable = NULL;
1962 datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
1963 tgrel->rd_att, &isnull);
1964 if (!isnull)
1965 build->tgnewtable =
1966 DatumGetCString(DirectFunctionCall1(nameout, datum));
1967 else
1968 build->tgnewtable = NULL;
1970 datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1971 tgrel->rd_att, &isnull);
1972 if (!isnull)
1973 build->tgqual = TextDatumGetCString(datum);
1974 else
1975 build->tgqual = NULL;
1977 numtrigs++;
1980 systable_endscan(tgscan);
1981 table_close(tgrel, AccessShareLock);
1983 /* There might not be any triggers */
1984 if (numtrigs == 0)
1986 pfree(triggers);
1987 return;
1990 /* Build trigdesc */
1991 trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1992 trigdesc->triggers = triggers;
1993 trigdesc->numtriggers = numtrigs;
1994 for (i = 0; i < numtrigs; i++)
1995 SetTriggerFlags(trigdesc, &(triggers[i]));
1997 /* Copy completed trigdesc into cache storage */
1998 oldContext = MemoryContextSwitchTo(CacheMemoryContext);
1999 relation->trigdesc = CopyTriggerDesc(trigdesc);
2000 MemoryContextSwitchTo(oldContext);
2002 /* Release working memory */
2003 FreeTriggerDesc(trigdesc);
2007 * Update the TriggerDesc's hint flags to include the specified trigger
2009 static void
2010 SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger)
2012 int16 tgtype = trigger->tgtype;
2014 trigdesc->trig_insert_before_row |=
2015 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2016 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2017 trigdesc->trig_insert_after_row |=
2018 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2019 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2020 trigdesc->trig_insert_instead_row |=
2021 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2022 TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
2023 trigdesc->trig_insert_before_statement |=
2024 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2025 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2026 trigdesc->trig_insert_after_statement |=
2027 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2028 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2029 trigdesc->trig_update_before_row |=
2030 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2031 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2032 trigdesc->trig_update_after_row |=
2033 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2034 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2035 trigdesc->trig_update_instead_row |=
2036 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2037 TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
2038 trigdesc->trig_update_before_statement |=
2039 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2040 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2041 trigdesc->trig_update_after_statement |=
2042 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2043 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2044 trigdesc->trig_delete_before_row |=
2045 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2046 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2047 trigdesc->trig_delete_after_row |=
2048 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2049 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2050 trigdesc->trig_delete_instead_row |=
2051 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2052 TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
2053 trigdesc->trig_delete_before_statement |=
2054 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2055 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2056 trigdesc->trig_delete_after_statement |=
2057 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2058 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2059 /* there are no row-level truncate triggers */
2060 trigdesc->trig_truncate_before_statement |=
2061 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2062 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
2063 trigdesc->trig_truncate_after_statement |=
2064 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2065 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
2067 trigdesc->trig_insert_new_table |=
2068 (TRIGGER_FOR_INSERT(tgtype) &&
2069 TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2070 trigdesc->trig_update_old_table |=
2071 (TRIGGER_FOR_UPDATE(tgtype) &&
2072 TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2073 trigdesc->trig_update_new_table |=
2074 (TRIGGER_FOR_UPDATE(tgtype) &&
2075 TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2076 trigdesc->trig_delete_old_table |=
2077 (TRIGGER_FOR_DELETE(tgtype) &&
2078 TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2082 * Copy a TriggerDesc data structure.
2084 * The copy is allocated in the current memory context.
2086 TriggerDesc *
2087 CopyTriggerDesc(TriggerDesc *trigdesc)
2089 TriggerDesc *newdesc;
2090 Trigger *trigger;
2091 int i;
2093 if (trigdesc == NULL || trigdesc->numtriggers <= 0)
2094 return NULL;
2096 newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
2097 memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
2099 trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
2100 memcpy(trigger, trigdesc->triggers,
2101 trigdesc->numtriggers * sizeof(Trigger));
2102 newdesc->triggers = trigger;
2104 for (i = 0; i < trigdesc->numtriggers; i++)
2106 trigger->tgname = pstrdup(trigger->tgname);
2107 if (trigger->tgnattr > 0)
2109 int16 *newattr;
2111 newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
2112 memcpy(newattr, trigger->tgattr,
2113 trigger->tgnattr * sizeof(int16));
2114 trigger->tgattr = newattr;
2116 if (trigger->tgnargs > 0)
2118 char **newargs;
2119 int16 j;
2121 newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
2122 for (j = 0; j < trigger->tgnargs; j++)
2123 newargs[j] = pstrdup(trigger->tgargs[j]);
2124 trigger->tgargs = newargs;
2126 if (trigger->tgqual)
2127 trigger->tgqual = pstrdup(trigger->tgqual);
2128 if (trigger->tgoldtable)
2129 trigger->tgoldtable = pstrdup(trigger->tgoldtable);
2130 if (trigger->tgnewtable)
2131 trigger->tgnewtable = pstrdup(trigger->tgnewtable);
2132 trigger++;
2135 return newdesc;
2139 * Free a TriggerDesc data structure.
2141 void
2142 FreeTriggerDesc(TriggerDesc *trigdesc)
2144 Trigger *trigger;
2145 int i;
2147 if (trigdesc == NULL)
2148 return;
2150 trigger = trigdesc->triggers;
2151 for (i = 0; i < trigdesc->numtriggers; i++)
2153 pfree(trigger->tgname);
2154 if (trigger->tgnattr > 0)
2155 pfree(trigger->tgattr);
2156 if (trigger->tgnargs > 0)
2158 while (--(trigger->tgnargs) >= 0)
2159 pfree(trigger->tgargs[trigger->tgnargs]);
2160 pfree(trigger->tgargs);
2162 if (trigger->tgqual)
2163 pfree(trigger->tgqual);
2164 if (trigger->tgoldtable)
2165 pfree(trigger->tgoldtable);
2166 if (trigger->tgnewtable)
2167 pfree(trigger->tgnewtable);
2168 trigger++;
2170 pfree(trigdesc->triggers);
2171 pfree(trigdesc);
2175 * Compare two TriggerDesc structures for logical equality.
2177 #ifdef NOT_USED
2178 bool
2179 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
2181 int i,
2185 * We need not examine the hint flags, just the trigger array itself; if
2186 * we have the same triggers with the same types, the flags should match.
2188 * As of 7.3 we assume trigger set ordering is significant in the
2189 * comparison; so we just compare corresponding slots of the two sets.
2191 * Note: comparing the stringToNode forms of the WHEN clauses means that
2192 * parse column locations will affect the result. This is okay as long as
2193 * this function is only used for detecting exact equality, as for example
2194 * in checking for staleness of a cache entry.
2196 if (trigdesc1 != NULL)
2198 if (trigdesc2 == NULL)
2199 return false;
2200 if (trigdesc1->numtriggers != trigdesc2->numtriggers)
2201 return false;
2202 for (i = 0; i < trigdesc1->numtriggers; i++)
2204 Trigger *trig1 = trigdesc1->triggers + i;
2205 Trigger *trig2 = trigdesc2->triggers + i;
2207 if (trig1->tgoid != trig2->tgoid)
2208 return false;
2209 if (strcmp(trig1->tgname, trig2->tgname) != 0)
2210 return false;
2211 if (trig1->tgfoid != trig2->tgfoid)
2212 return false;
2213 if (trig1->tgtype != trig2->tgtype)
2214 return false;
2215 if (trig1->tgenabled != trig2->tgenabled)
2216 return false;
2217 if (trig1->tgisinternal != trig2->tgisinternal)
2218 return false;
2219 if (trig1->tgisclone != trig2->tgisclone)
2220 return false;
2221 if (trig1->tgconstrrelid != trig2->tgconstrrelid)
2222 return false;
2223 if (trig1->tgconstrindid != trig2->tgconstrindid)
2224 return false;
2225 if (trig1->tgconstraint != trig2->tgconstraint)
2226 return false;
2227 if (trig1->tgdeferrable != trig2->tgdeferrable)
2228 return false;
2229 if (trig1->tginitdeferred != trig2->tginitdeferred)
2230 return false;
2231 if (trig1->tgnargs != trig2->tgnargs)
2232 return false;
2233 if (trig1->tgnattr != trig2->tgnattr)
2234 return false;
2235 if (trig1->tgnattr > 0 &&
2236 memcmp(trig1->tgattr, trig2->tgattr,
2237 trig1->tgnattr * sizeof(int16)) != 0)
2238 return false;
2239 for (j = 0; j < trig1->tgnargs; j++)
2240 if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2241 return false;
2242 if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2243 /* ok */ ;
2244 else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2245 return false;
2246 else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2247 return false;
2248 if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2249 /* ok */ ;
2250 else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2251 return false;
2252 else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2253 return false;
2254 if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2255 /* ok */ ;
2256 else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2257 return false;
2258 else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2259 return false;
2262 else if (trigdesc2 != NULL)
2263 return false;
2264 return true;
2266 #endif /* NOT_USED */
2269 * Check if there is a row-level trigger with transition tables that prevents
2270 * a table from becoming an inheritance child or partition. Return the name
2271 * of the first such incompatible trigger, or NULL if there is none.
2273 const char *
2274 FindTriggerIncompatibleWithInheritance(TriggerDesc *trigdesc)
2276 if (trigdesc != NULL)
2278 int i;
2280 for (i = 0; i < trigdesc->numtriggers; ++i)
2282 Trigger *trigger = &trigdesc->triggers[i];
2284 if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2285 return trigger->tgname;
2289 return NULL;
2293 * Call a trigger function.
2295 * trigdata: trigger descriptor.
2296 * tgindx: trigger's index in finfo and instr arrays.
2297 * finfo: array of cached trigger function call information.
2298 * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2299 * per_tuple_context: memory context to execute the function in.
2301 * Returns the tuple (or NULL) as returned by the function.
2303 static HeapTuple
2304 ExecCallTriggerFunc(TriggerData *trigdata,
2305 int tgindx,
2306 FmgrInfo *finfo,
2307 Instrumentation *instr,
2308 MemoryContext per_tuple_context)
2310 LOCAL_FCINFO(fcinfo, 0);
2311 PgStat_FunctionCallUsage fcusage;
2312 Datum result;
2313 MemoryContext oldContext;
2316 * Protect against code paths that may fail to initialize transition table
2317 * info.
2319 Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2320 TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2321 TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2322 TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2323 !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2324 !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2325 (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2327 finfo += tgindx;
2330 * We cache fmgr lookup info, to avoid making the lookup again on each
2331 * call.
2333 if (finfo->fn_oid == InvalidOid)
2334 fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2336 Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2339 * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2341 if (instr)
2342 InstrStartNode(instr + tgindx);
2345 * Do the function evaluation in the per-tuple memory context, so that
2346 * leaked memory will be reclaimed once per tuple. Note in particular that
2347 * any new tuple created by the trigger function will live till the end of
2348 * the tuple cycle.
2350 oldContext = MemoryContextSwitchTo(per_tuple_context);
2353 * Call the function, passing no arguments but setting a context.
2355 InitFunctionCallInfoData(*fcinfo, finfo, 0,
2356 InvalidOid, (Node *) trigdata, NULL);
2358 pgstat_init_function_usage(fcinfo, &fcusage);
2360 MyTriggerDepth++;
2361 PG_TRY();
2363 result = FunctionCallInvoke(fcinfo);
2365 PG_FINALLY();
2367 MyTriggerDepth--;
2369 PG_END_TRY();
2371 pgstat_end_function_usage(&fcusage, true);
2373 MemoryContextSwitchTo(oldContext);
2376 * Trigger protocol allows function to return a null pointer, but NOT to
2377 * set the isnull result flag.
2379 if (fcinfo->isnull)
2380 ereport(ERROR,
2381 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2382 errmsg("trigger function %u returned null value",
2383 fcinfo->flinfo->fn_oid)));
2386 * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2387 * one "tuple returned" (really the number of firings).
2389 if (instr)
2390 InstrStopNode(instr + tgindx, 1);
2392 return (HeapTuple) DatumGetPointer(result);
2395 void
2396 ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
2398 TriggerDesc *trigdesc;
2399 int i;
2400 TriggerData LocTriggerData = {0};
2402 trigdesc = relinfo->ri_TrigDesc;
2404 if (trigdesc == NULL)
2405 return;
2406 if (!trigdesc->trig_insert_before_statement)
2407 return;
2409 /* no-op if we already fired BS triggers in this context */
2410 if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc),
2411 CMD_INSERT))
2412 return;
2414 LocTriggerData.type = T_TriggerData;
2415 LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2416 TRIGGER_EVENT_BEFORE;
2417 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2418 for (i = 0; i < trigdesc->numtriggers; i++)
2420 Trigger *trigger = &trigdesc->triggers[i];
2421 HeapTuple newtuple;
2423 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2424 TRIGGER_TYPE_STATEMENT,
2425 TRIGGER_TYPE_BEFORE,
2426 TRIGGER_TYPE_INSERT))
2427 continue;
2428 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2429 NULL, NULL, NULL))
2430 continue;
2432 LocTriggerData.tg_trigger = trigger;
2433 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2435 relinfo->ri_TrigFunctions,
2436 relinfo->ri_TrigInstrument,
2437 GetPerTupleMemoryContext(estate));
2439 if (newtuple)
2440 ereport(ERROR,
2441 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2442 errmsg("BEFORE STATEMENT trigger cannot return a value")));
2446 void
2447 ExecASInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2448 TransitionCaptureState *transition_capture)
2450 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2452 if (trigdesc && trigdesc->trig_insert_after_statement)
2453 AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2454 TRIGGER_EVENT_INSERT,
2455 false, NULL, NULL, NIL, NULL, transition_capture,
2456 false);
2459 bool
2460 ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2461 TupleTableSlot *slot)
2463 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2464 HeapTuple newtuple = NULL;
2465 bool should_free;
2466 TriggerData LocTriggerData = {0};
2467 int i;
2469 LocTriggerData.type = T_TriggerData;
2470 LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2471 TRIGGER_EVENT_ROW |
2472 TRIGGER_EVENT_BEFORE;
2473 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2474 for (i = 0; i < trigdesc->numtriggers; i++)
2476 Trigger *trigger = &trigdesc->triggers[i];
2477 HeapTuple oldtuple;
2479 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2480 TRIGGER_TYPE_ROW,
2481 TRIGGER_TYPE_BEFORE,
2482 TRIGGER_TYPE_INSERT))
2483 continue;
2484 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2485 NULL, NULL, slot))
2486 continue;
2488 if (!newtuple)
2489 newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2491 LocTriggerData.tg_trigslot = slot;
2492 LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2493 LocTriggerData.tg_trigger = trigger;
2494 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2496 relinfo->ri_TrigFunctions,
2497 relinfo->ri_TrigInstrument,
2498 GetPerTupleMemoryContext(estate));
2499 if (newtuple == NULL)
2501 if (should_free)
2502 heap_freetuple(oldtuple);
2503 return false; /* "do nothing" */
2505 else if (newtuple != oldtuple)
2507 ExecForceStoreHeapTuple(newtuple, slot, false);
2510 * After a tuple in a partition goes through a trigger, the user
2511 * could have changed the partition key enough that the tuple no
2512 * longer fits the partition. Verify that.
2514 if (trigger->tgisclone &&
2515 !ExecPartitionCheck(relinfo, slot, estate, false))
2516 ereport(ERROR,
2517 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2518 errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"),
2519 errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2520 trigger->tgname,
2521 get_namespace_name(RelationGetNamespace(relinfo->ri_RelationDesc)),
2522 RelationGetRelationName(relinfo->ri_RelationDesc))));
2524 if (should_free)
2525 heap_freetuple(oldtuple);
2527 /* signal tuple should be re-fetched if used */
2528 newtuple = NULL;
2532 return true;
2535 void
2536 ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2537 TupleTableSlot *slot, List *recheckIndexes,
2538 TransitionCaptureState *transition_capture)
2540 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2542 if ((trigdesc && trigdesc->trig_insert_after_row) ||
2543 (transition_capture && transition_capture->tcs_insert_new_table))
2544 AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2545 TRIGGER_EVENT_INSERT,
2546 true, NULL, slot,
2547 recheckIndexes, NULL,
2548 transition_capture,
2549 false);
2552 bool
2553 ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2554 TupleTableSlot *slot)
2556 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2557 HeapTuple newtuple = NULL;
2558 bool should_free;
2559 TriggerData LocTriggerData = {0};
2560 int i;
2562 LocTriggerData.type = T_TriggerData;
2563 LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2564 TRIGGER_EVENT_ROW |
2565 TRIGGER_EVENT_INSTEAD;
2566 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2567 for (i = 0; i < trigdesc->numtriggers; i++)
2569 Trigger *trigger = &trigdesc->triggers[i];
2570 HeapTuple oldtuple;
2572 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2573 TRIGGER_TYPE_ROW,
2574 TRIGGER_TYPE_INSTEAD,
2575 TRIGGER_TYPE_INSERT))
2576 continue;
2577 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2578 NULL, NULL, slot))
2579 continue;
2581 if (!newtuple)
2582 newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2584 LocTriggerData.tg_trigslot = slot;
2585 LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2586 LocTriggerData.tg_trigger = trigger;
2587 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2589 relinfo->ri_TrigFunctions,
2590 relinfo->ri_TrigInstrument,
2591 GetPerTupleMemoryContext(estate));
2592 if (newtuple == NULL)
2594 if (should_free)
2595 heap_freetuple(oldtuple);
2596 return false; /* "do nothing" */
2598 else if (newtuple != oldtuple)
2600 ExecForceStoreHeapTuple(newtuple, slot, false);
2602 if (should_free)
2603 heap_freetuple(oldtuple);
2605 /* signal tuple should be re-fetched if used */
2606 newtuple = NULL;
2610 return true;
2613 void
2614 ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
2616 TriggerDesc *trigdesc;
2617 int i;
2618 TriggerData LocTriggerData = {0};
2620 trigdesc = relinfo->ri_TrigDesc;
2622 if (trigdesc == NULL)
2623 return;
2624 if (!trigdesc->trig_delete_before_statement)
2625 return;
2627 /* no-op if we already fired BS triggers in this context */
2628 if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc),
2629 CMD_DELETE))
2630 return;
2632 LocTriggerData.type = T_TriggerData;
2633 LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2634 TRIGGER_EVENT_BEFORE;
2635 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2636 for (i = 0; i < trigdesc->numtriggers; i++)
2638 Trigger *trigger = &trigdesc->triggers[i];
2639 HeapTuple newtuple;
2641 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2642 TRIGGER_TYPE_STATEMENT,
2643 TRIGGER_TYPE_BEFORE,
2644 TRIGGER_TYPE_DELETE))
2645 continue;
2646 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2647 NULL, NULL, NULL))
2648 continue;
2650 LocTriggerData.tg_trigger = trigger;
2651 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2653 relinfo->ri_TrigFunctions,
2654 relinfo->ri_TrigInstrument,
2655 GetPerTupleMemoryContext(estate));
2657 if (newtuple)
2658 ereport(ERROR,
2659 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2660 errmsg("BEFORE STATEMENT trigger cannot return a value")));
2664 void
2665 ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
2666 TransitionCaptureState *transition_capture)
2668 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2670 if (trigdesc && trigdesc->trig_delete_after_statement)
2671 AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2672 TRIGGER_EVENT_DELETE,
2673 false, NULL, NULL, NIL, NULL, transition_capture,
2674 false);
2678 * Execute BEFORE ROW DELETE triggers.
2680 * True indicates caller can proceed with the delete. False indicates caller
2681 * need to suppress the delete and additionally if requested, we need to pass
2682 * back the concurrently updated tuple if any.
2684 bool
2685 ExecBRDeleteTriggers(EState *estate, EPQState *epqstate,
2686 ResultRelInfo *relinfo,
2687 ItemPointer tupleid,
2688 HeapTuple fdw_trigtuple,
2689 TupleTableSlot **epqslot,
2690 TM_Result *tmresult,
2691 TM_FailureData *tmfd)
2693 TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2694 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2695 bool result = true;
2696 TriggerData LocTriggerData = {0};
2697 HeapTuple trigtuple;
2698 bool should_free = false;
2699 int i;
2701 Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2702 if (fdw_trigtuple == NULL)
2704 TupleTableSlot *epqslot_candidate = NULL;
2706 if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2707 LockTupleExclusive, slot, &epqslot_candidate,
2708 tmresult, tmfd))
2709 return false;
2712 * If the tuple was concurrently updated and the caller of this
2713 * function requested for the updated tuple, skip the trigger
2714 * execution.
2716 if (epqslot_candidate != NULL && epqslot != NULL)
2718 *epqslot = epqslot_candidate;
2719 return false;
2722 trigtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2724 else
2726 trigtuple = fdw_trigtuple;
2727 ExecForceStoreHeapTuple(trigtuple, slot, false);
2730 LocTriggerData.type = T_TriggerData;
2731 LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2732 TRIGGER_EVENT_ROW |
2733 TRIGGER_EVENT_BEFORE;
2734 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2735 for (i = 0; i < trigdesc->numtriggers; i++)
2737 HeapTuple newtuple;
2738 Trigger *trigger = &trigdesc->triggers[i];
2740 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2741 TRIGGER_TYPE_ROW,
2742 TRIGGER_TYPE_BEFORE,
2743 TRIGGER_TYPE_DELETE))
2744 continue;
2745 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2746 NULL, slot, NULL))
2747 continue;
2749 LocTriggerData.tg_trigslot = slot;
2750 LocTriggerData.tg_trigtuple = trigtuple;
2751 LocTriggerData.tg_trigger = trigger;
2752 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2754 relinfo->ri_TrigFunctions,
2755 relinfo->ri_TrigInstrument,
2756 GetPerTupleMemoryContext(estate));
2757 if (newtuple == NULL)
2759 result = false; /* tell caller to suppress delete */
2760 break;
2762 if (newtuple != trigtuple)
2763 heap_freetuple(newtuple);
2765 if (should_free)
2766 heap_freetuple(trigtuple);
2768 return result;
2772 * Note: is_crosspart_update must be true if the DELETE is being performed
2773 * as part of a cross-partition update.
2775 void
2776 ExecARDeleteTriggers(EState *estate,
2777 ResultRelInfo *relinfo,
2778 ItemPointer tupleid,
2779 HeapTuple fdw_trigtuple,
2780 TransitionCaptureState *transition_capture,
2781 bool is_crosspart_update)
2783 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2785 if ((trigdesc && trigdesc->trig_delete_after_row) ||
2786 (transition_capture && transition_capture->tcs_delete_old_table))
2788 TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2790 Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2791 if (fdw_trigtuple == NULL)
2792 GetTupleForTrigger(estate,
2793 NULL,
2794 relinfo,
2795 tupleid,
2796 LockTupleExclusive,
2797 slot,
2798 NULL,
2799 NULL,
2800 NULL);
2801 else
2802 ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
2804 AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2805 TRIGGER_EVENT_DELETE,
2806 true, slot, NULL, NIL, NULL,
2807 transition_capture,
2808 is_crosspart_update);
2812 bool
2813 ExecIRDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
2814 HeapTuple trigtuple)
2816 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2817 TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2818 TriggerData LocTriggerData = {0};
2819 int i;
2821 LocTriggerData.type = T_TriggerData;
2822 LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2823 TRIGGER_EVENT_ROW |
2824 TRIGGER_EVENT_INSTEAD;
2825 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2827 ExecForceStoreHeapTuple(trigtuple, slot, false);
2829 for (i = 0; i < trigdesc->numtriggers; i++)
2831 HeapTuple rettuple;
2832 Trigger *trigger = &trigdesc->triggers[i];
2834 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2835 TRIGGER_TYPE_ROW,
2836 TRIGGER_TYPE_INSTEAD,
2837 TRIGGER_TYPE_DELETE))
2838 continue;
2839 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2840 NULL, slot, NULL))
2841 continue;
2843 LocTriggerData.tg_trigslot = slot;
2844 LocTriggerData.tg_trigtuple = trigtuple;
2845 LocTriggerData.tg_trigger = trigger;
2846 rettuple = ExecCallTriggerFunc(&LocTriggerData,
2848 relinfo->ri_TrigFunctions,
2849 relinfo->ri_TrigInstrument,
2850 GetPerTupleMemoryContext(estate));
2851 if (rettuple == NULL)
2852 return false; /* Delete was suppressed */
2853 if (rettuple != trigtuple)
2854 heap_freetuple(rettuple);
2856 return true;
2859 void
2860 ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
2862 TriggerDesc *trigdesc;
2863 int i;
2864 TriggerData LocTriggerData = {0};
2865 Bitmapset *updatedCols;
2867 trigdesc = relinfo->ri_TrigDesc;
2869 if (trigdesc == NULL)
2870 return;
2871 if (!trigdesc->trig_update_before_statement)
2872 return;
2874 /* no-op if we already fired BS triggers in this context */
2875 if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc),
2876 CMD_UPDATE))
2877 return;
2879 /* statement-level triggers operate on the parent table */
2880 Assert(relinfo->ri_RootResultRelInfo == NULL);
2882 updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2884 LocTriggerData.type = T_TriggerData;
2885 LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2886 TRIGGER_EVENT_BEFORE;
2887 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2888 LocTriggerData.tg_updatedcols = updatedCols;
2889 for (i = 0; i < trigdesc->numtriggers; i++)
2891 Trigger *trigger = &trigdesc->triggers[i];
2892 HeapTuple newtuple;
2894 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2895 TRIGGER_TYPE_STATEMENT,
2896 TRIGGER_TYPE_BEFORE,
2897 TRIGGER_TYPE_UPDATE))
2898 continue;
2899 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2900 updatedCols, NULL, NULL))
2901 continue;
2903 LocTriggerData.tg_trigger = trigger;
2904 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2906 relinfo->ri_TrigFunctions,
2907 relinfo->ri_TrigInstrument,
2908 GetPerTupleMemoryContext(estate));
2910 if (newtuple)
2911 ereport(ERROR,
2912 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2913 errmsg("BEFORE STATEMENT trigger cannot return a value")));
2917 void
2918 ExecASUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
2919 TransitionCaptureState *transition_capture)
2921 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2923 /* statement-level triggers operate on the parent table */
2924 Assert(relinfo->ri_RootResultRelInfo == NULL);
2926 if (trigdesc && trigdesc->trig_update_after_statement)
2927 AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2928 TRIGGER_EVENT_UPDATE,
2929 false, NULL, NULL, NIL,
2930 ExecGetAllUpdatedCols(relinfo, estate),
2931 transition_capture,
2932 false);
2935 bool
2936 ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
2937 ResultRelInfo *relinfo,
2938 ItemPointer tupleid,
2939 HeapTuple fdw_trigtuple,
2940 TupleTableSlot *newslot,
2941 TM_Result *tmresult,
2942 TM_FailureData *tmfd)
2944 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2945 TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2946 HeapTuple newtuple = NULL;
2947 HeapTuple trigtuple;
2948 bool should_free_trig = false;
2949 bool should_free_new = false;
2950 TriggerData LocTriggerData = {0};
2951 int i;
2952 Bitmapset *updatedCols;
2953 LockTupleMode lockmode;
2955 /* Determine lock mode to use */
2956 lockmode = ExecUpdateLockMode(estate, relinfo);
2958 Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2959 if (fdw_trigtuple == NULL)
2961 TupleTableSlot *epqslot_candidate = NULL;
2963 /* get a copy of the on-disk tuple we are planning to update */
2964 if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2965 lockmode, oldslot, &epqslot_candidate,
2966 tmresult, tmfd))
2967 return false; /* cancel the update action */
2970 * In READ COMMITTED isolation level it's possible that target tuple
2971 * was changed due to concurrent update. In that case we have a raw
2972 * subplan output tuple in epqslot_candidate, and need to form a new
2973 * insertable tuple using ExecGetUpdateNewTuple to replace the one we
2974 * received in newslot. Neither we nor our callers have any further
2975 * interest in the passed-in tuple, so it's okay to overwrite newslot
2976 * with the newer data.
2978 if (epqslot_candidate != NULL)
2980 TupleTableSlot *epqslot_clean;
2982 epqslot_clean = ExecGetUpdateNewTuple(relinfo, epqslot_candidate,
2983 oldslot);
2986 * Typically, the caller's newslot was also generated by
2987 * ExecGetUpdateNewTuple, so that epqslot_clean will be the same
2988 * slot and copying is not needed. But do the right thing if it
2989 * isn't.
2991 if (unlikely(newslot != epqslot_clean))
2992 ExecCopySlot(newslot, epqslot_clean);
2995 * At this point newslot contains a virtual tuple that may
2996 * reference some fields of oldslot's tuple in some disk buffer.
2997 * If that tuple is in a different page than the original target
2998 * tuple, then our only pin on that buffer is oldslot's, and we're
2999 * about to release it. Hence we'd better materialize newslot to
3000 * ensure it doesn't contain references into an unpinned buffer.
3001 * (We'd materialize it below anyway, but too late for safety.)
3003 ExecMaterializeSlot(newslot);
3007 * Here we convert oldslot to a materialized slot holding trigtuple.
3008 * Neither slot passed to the triggers will hold any buffer pin.
3010 trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig);
3012 else
3014 /* Put the FDW-supplied tuple into oldslot to unify the cases */
3015 ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3016 trigtuple = fdw_trigtuple;
3019 LocTriggerData.type = T_TriggerData;
3020 LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3021 TRIGGER_EVENT_ROW |
3022 TRIGGER_EVENT_BEFORE;
3023 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3024 updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
3025 LocTriggerData.tg_updatedcols = updatedCols;
3026 for (i = 0; i < trigdesc->numtriggers; i++)
3028 Trigger *trigger = &trigdesc->triggers[i];
3029 HeapTuple oldtuple;
3031 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3032 TRIGGER_TYPE_ROW,
3033 TRIGGER_TYPE_BEFORE,
3034 TRIGGER_TYPE_UPDATE))
3035 continue;
3036 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3037 updatedCols, oldslot, newslot))
3038 continue;
3040 if (!newtuple)
3041 newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free_new);
3043 LocTriggerData.tg_trigslot = oldslot;
3044 LocTriggerData.tg_trigtuple = trigtuple;
3045 LocTriggerData.tg_newtuple = oldtuple = newtuple;
3046 LocTriggerData.tg_newslot = newslot;
3047 LocTriggerData.tg_trigger = trigger;
3048 newtuple = ExecCallTriggerFunc(&LocTriggerData,
3050 relinfo->ri_TrigFunctions,
3051 relinfo->ri_TrigInstrument,
3052 GetPerTupleMemoryContext(estate));
3054 if (newtuple == NULL)
3056 if (should_free_trig)
3057 heap_freetuple(trigtuple);
3058 if (should_free_new)
3059 heap_freetuple(oldtuple);
3060 return false; /* "do nothing" */
3062 else if (newtuple != oldtuple)
3064 ExecForceStoreHeapTuple(newtuple, newslot, false);
3067 * If the tuple returned by the trigger / being stored, is the old
3068 * row version, and the heap tuple passed to the trigger was
3069 * allocated locally, materialize the slot. Otherwise we might
3070 * free it while still referenced by the slot.
3072 if (should_free_trig && newtuple == trigtuple)
3073 ExecMaterializeSlot(newslot);
3075 if (should_free_new)
3076 heap_freetuple(oldtuple);
3078 /* signal tuple should be re-fetched if used */
3079 newtuple = NULL;
3082 if (should_free_trig)
3083 heap_freetuple(trigtuple);
3085 return true;
3089 * Note: 'src_partinfo' and 'dst_partinfo', when non-NULL, refer to the source
3090 * and destination partitions, respectively, of a cross-partition update of
3091 * the root partitioned table mentioned in the query, given by 'relinfo'.
3092 * 'tupleid' in that case refers to the ctid of the "old" tuple in the source
3093 * partition, and 'newslot' contains the "new" tuple in the destination
3094 * partition. This interface allows to support the requirements of
3095 * ExecCrossPartitionUpdateForeignKey(); is_crosspart_update must be true in
3096 * that case.
3098 void
3099 ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
3100 ResultRelInfo *src_partinfo,
3101 ResultRelInfo *dst_partinfo,
3102 ItemPointer tupleid,
3103 HeapTuple fdw_trigtuple,
3104 TupleTableSlot *newslot,
3105 List *recheckIndexes,
3106 TransitionCaptureState *transition_capture,
3107 bool is_crosspart_update)
3109 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3111 if ((trigdesc && trigdesc->trig_update_after_row) ||
3112 (transition_capture &&
3113 (transition_capture->tcs_update_old_table ||
3114 transition_capture->tcs_update_new_table)))
3117 * Note: if the UPDATE is converted into a DELETE+INSERT as part of
3118 * update-partition-key operation, then this function is also called
3119 * separately for DELETE and INSERT to capture transition table rows.
3120 * In such case, either old tuple or new tuple can be NULL.
3122 TupleTableSlot *oldslot;
3123 ResultRelInfo *tupsrc;
3125 Assert((src_partinfo != NULL && dst_partinfo != NULL) ||
3126 !is_crosspart_update);
3128 tupsrc = src_partinfo ? src_partinfo : relinfo;
3129 oldslot = ExecGetTriggerOldSlot(estate, tupsrc);
3131 if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
3132 GetTupleForTrigger(estate,
3133 NULL,
3134 tupsrc,
3135 tupleid,
3136 LockTupleExclusive,
3137 oldslot,
3138 NULL,
3139 NULL,
3140 NULL);
3141 else if (fdw_trigtuple != NULL)
3142 ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3143 else
3144 ExecClearTuple(oldslot);
3146 AfterTriggerSaveEvent(estate, relinfo,
3147 src_partinfo, dst_partinfo,
3148 TRIGGER_EVENT_UPDATE,
3149 true,
3150 oldslot, newslot, recheckIndexes,
3151 ExecGetAllUpdatedCols(relinfo, estate),
3152 transition_capture,
3153 is_crosspart_update);
3157 bool
3158 ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
3159 HeapTuple trigtuple, TupleTableSlot *newslot)
3161 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3162 TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
3163 HeapTuple newtuple = NULL;
3164 bool should_free;
3165 TriggerData LocTriggerData = {0};
3166 int i;
3168 LocTriggerData.type = T_TriggerData;
3169 LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3170 TRIGGER_EVENT_ROW |
3171 TRIGGER_EVENT_INSTEAD;
3172 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3174 ExecForceStoreHeapTuple(trigtuple, oldslot, false);
3176 for (i = 0; i < trigdesc->numtriggers; i++)
3178 Trigger *trigger = &trigdesc->triggers[i];
3179 HeapTuple oldtuple;
3181 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3182 TRIGGER_TYPE_ROW,
3183 TRIGGER_TYPE_INSTEAD,
3184 TRIGGER_TYPE_UPDATE))
3185 continue;
3186 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3187 NULL, oldslot, newslot))
3188 continue;
3190 if (!newtuple)
3191 newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free);
3193 LocTriggerData.tg_trigslot = oldslot;
3194 LocTriggerData.tg_trigtuple = trigtuple;
3195 LocTriggerData.tg_newslot = newslot;
3196 LocTriggerData.tg_newtuple = oldtuple = newtuple;
3198 LocTriggerData.tg_trigger = trigger;
3199 newtuple = ExecCallTriggerFunc(&LocTriggerData,
3201 relinfo->ri_TrigFunctions,
3202 relinfo->ri_TrigInstrument,
3203 GetPerTupleMemoryContext(estate));
3204 if (newtuple == NULL)
3206 return false; /* "do nothing" */
3208 else if (newtuple != oldtuple)
3210 ExecForceStoreHeapTuple(newtuple, newslot, false);
3212 if (should_free)
3213 heap_freetuple(oldtuple);
3215 /* signal tuple should be re-fetched if used */
3216 newtuple = NULL;
3220 return true;
3223 void
3224 ExecBSTruncateTriggers(EState *estate, ResultRelInfo *relinfo)
3226 TriggerDesc *trigdesc;
3227 int i;
3228 TriggerData LocTriggerData = {0};
3230 trigdesc = relinfo->ri_TrigDesc;
3232 if (trigdesc == NULL)
3233 return;
3234 if (!trigdesc->trig_truncate_before_statement)
3235 return;
3237 LocTriggerData.type = T_TriggerData;
3238 LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
3239 TRIGGER_EVENT_BEFORE;
3240 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3242 for (i = 0; i < trigdesc->numtriggers; i++)
3244 Trigger *trigger = &trigdesc->triggers[i];
3245 HeapTuple newtuple;
3247 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3248 TRIGGER_TYPE_STATEMENT,
3249 TRIGGER_TYPE_BEFORE,
3250 TRIGGER_TYPE_TRUNCATE))
3251 continue;
3252 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3253 NULL, NULL, NULL))
3254 continue;
3256 LocTriggerData.tg_trigger = trigger;
3257 newtuple = ExecCallTriggerFunc(&LocTriggerData,
3259 relinfo->ri_TrigFunctions,
3260 relinfo->ri_TrigInstrument,
3261 GetPerTupleMemoryContext(estate));
3263 if (newtuple)
3264 ereport(ERROR,
3265 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
3266 errmsg("BEFORE STATEMENT trigger cannot return a value")));
3270 void
3271 ExecASTruncateTriggers(EState *estate, ResultRelInfo *relinfo)
3273 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3275 if (trigdesc && trigdesc->trig_truncate_after_statement)
3276 AfterTriggerSaveEvent(estate, relinfo,
3277 NULL, NULL,
3278 TRIGGER_EVENT_TRUNCATE,
3279 false, NULL, NULL, NIL, NULL, NULL,
3280 false);
3285 * Fetch tuple into "oldslot", dealing with locking and EPQ if necessary
3287 static bool
3288 GetTupleForTrigger(EState *estate,
3289 EPQState *epqstate,
3290 ResultRelInfo *relinfo,
3291 ItemPointer tid,
3292 LockTupleMode lockmode,
3293 TupleTableSlot *oldslot,
3294 TupleTableSlot **epqslot,
3295 TM_Result *tmresultp,
3296 TM_FailureData *tmfdp)
3298 Relation relation = relinfo->ri_RelationDesc;
3300 if (epqslot != NULL)
3302 TM_Result test;
3303 TM_FailureData tmfd;
3304 int lockflags = 0;
3306 *epqslot = NULL;
3308 /* caller must pass an epqstate if EvalPlanQual is possible */
3309 Assert(epqstate != NULL);
3312 * lock tuple for update
3314 if (!IsolationUsesXactSnapshot())
3315 lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
3316 test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
3317 estate->es_output_cid,
3318 lockmode, LockWaitBlock,
3319 lockflags,
3320 &tmfd);
3322 /* Let the caller know about the status of this operation */
3323 if (tmresultp)
3324 *tmresultp = test;
3325 if (tmfdp)
3326 *tmfdp = tmfd;
3328 switch (test)
3330 case TM_SelfModified:
3333 * The target tuple was already updated or deleted by the
3334 * current command, or by a later command in the current
3335 * transaction. We ignore the tuple in the former case, and
3336 * throw error in the latter case, for the same reasons
3337 * enumerated in ExecUpdate and ExecDelete in
3338 * nodeModifyTable.c.
3340 if (tmfd.cmax != estate->es_output_cid)
3341 ereport(ERROR,
3342 (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3343 errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3344 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3346 /* treat it as deleted; do not process */
3347 return false;
3349 case TM_Ok:
3350 if (tmfd.traversed)
3353 * Recheck the tuple using EPQ. For MERGE, we leave this
3354 * to the caller (it must do additional rechecking, and
3355 * might end up executing a different action entirely).
3357 if (estate->es_plannedstmt->commandType == CMD_MERGE)
3359 if (tmresultp)
3360 *tmresultp = TM_Updated;
3361 return false;
3364 *epqslot = EvalPlanQual(epqstate,
3365 relation,
3366 relinfo->ri_RangeTableIndex,
3367 oldslot);
3370 * If PlanQual failed for updated tuple - we must not
3371 * process this tuple!
3373 if (TupIsNull(*epqslot))
3375 *epqslot = NULL;
3376 return false;
3379 break;
3381 case TM_Updated:
3382 if (IsolationUsesXactSnapshot())
3383 ereport(ERROR,
3384 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3385 errmsg("could not serialize access due to concurrent update")));
3386 elog(ERROR, "unexpected table_tuple_lock status: %u", test);
3387 break;
3389 case TM_Deleted:
3390 if (IsolationUsesXactSnapshot())
3391 ereport(ERROR,
3392 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3393 errmsg("could not serialize access due to concurrent delete")));
3394 /* tuple was deleted */
3395 return false;
3397 case TM_Invisible:
3398 elog(ERROR, "attempted to lock invisible tuple");
3399 break;
3401 default:
3402 elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3403 return false; /* keep compiler quiet */
3406 else
3409 * We expect the tuple to be present, thus very simple error handling
3410 * suffices.
3412 if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
3413 oldslot))
3414 elog(ERROR, "failed to fetch tuple for trigger");
3417 return true;
3421 * Is trigger enabled to fire?
3423 static bool
3424 TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
3425 Trigger *trigger, TriggerEvent event,
3426 Bitmapset *modifiedCols,
3427 TupleTableSlot *oldslot, TupleTableSlot *newslot)
3429 /* Check replication-role-dependent enable state */
3430 if (SessionReplicationRole == SESSION_REPLICATION_ROLE_REPLICA)
3432 if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3433 trigger->tgenabled == TRIGGER_DISABLED)
3434 return false;
3436 else /* ORIGIN or LOCAL role */
3438 if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3439 trigger->tgenabled == TRIGGER_DISABLED)
3440 return false;
3444 * Check for column-specific trigger (only possible for UPDATE, and in
3445 * fact we *must* ignore tgattr for other event types)
3447 if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3449 int i;
3450 bool modified;
3452 modified = false;
3453 for (i = 0; i < trigger->tgnattr; i++)
3455 if (bms_is_member(trigger->tgattr[i] - FirstLowInvalidHeapAttributeNumber,
3456 modifiedCols))
3458 modified = true;
3459 break;
3462 if (!modified)
3463 return false;
3466 /* Check for WHEN clause */
3467 if (trigger->tgqual)
3469 ExprState **predicate;
3470 ExprContext *econtext;
3471 MemoryContext oldContext;
3472 int i;
3474 Assert(estate != NULL);
3477 * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3478 * matching element of relinfo->ri_TrigWhenExprs[]
3480 i = trigger - relinfo->ri_TrigDesc->triggers;
3481 predicate = &relinfo->ri_TrigWhenExprs[i];
3484 * If first time through for this WHEN expression, build expression
3485 * nodetrees for it. Keep them in the per-query memory context so
3486 * they'll survive throughout the query.
3488 if (*predicate == NULL)
3490 Node *tgqual;
3492 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3493 tgqual = stringToNode(trigger->tgqual);
3494 /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3495 ChangeVarNodes(tgqual, PRS2_OLD_VARNO, INNER_VAR, 0);
3496 ChangeVarNodes(tgqual, PRS2_NEW_VARNO, OUTER_VAR, 0);
3497 /* ExecPrepareQual wants implicit-AND form */
3498 tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3499 *predicate = ExecPrepareQual((List *) tgqual, estate);
3500 MemoryContextSwitchTo(oldContext);
3504 * We will use the EState's per-tuple context for evaluating WHEN
3505 * expressions (creating it if it's not already there).
3507 econtext = GetPerTupleExprContext(estate);
3510 * Finally evaluate the expression, making the old and/or new tuples
3511 * available as INNER_VAR/OUTER_VAR respectively.
3513 econtext->ecxt_innertuple = oldslot;
3514 econtext->ecxt_outertuple = newslot;
3515 if (!ExecQual(*predicate, econtext))
3516 return false;
3519 return true;
3523 /* ----------
3524 * After-trigger stuff
3526 * The AfterTriggersData struct holds data about pending AFTER trigger events
3527 * during the current transaction tree. (BEFORE triggers are fired
3528 * immediately so we don't need any persistent state about them.) The struct
3529 * and most of its subsidiary data are kept in TopTransactionContext; however
3530 * some data that can be discarded sooner appears in the CurTransactionContext
3531 * of the relevant subtransaction. Also, the individual event records are
3532 * kept in a separate sub-context of TopTransactionContext. This is done
3533 * mainly so that it's easy to tell from a memory context dump how much space
3534 * is being eaten by trigger events.
3536 * Because the list of pending events can grow large, we go to some
3537 * considerable effort to minimize per-event memory consumption. The event
3538 * records are grouped into chunks and common data for similar events in the
3539 * same chunk is only stored once.
3541 * XXX We need to be able to save the per-event data in a file if it grows too
3542 * large.
3543 * ----------
3546 /* Per-trigger SET CONSTRAINT status */
3547 typedef struct SetConstraintTriggerData
3549 Oid sct_tgoid;
3550 bool sct_tgisdeferred;
3551 } SetConstraintTriggerData;
3553 typedef struct SetConstraintTriggerData *SetConstraintTrigger;
3556 * SET CONSTRAINT intra-transaction status.
3558 * We make this a single palloc'd object so it can be copied and freed easily.
3560 * all_isset and all_isdeferred are used to keep track
3561 * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3563 * trigstates[] stores per-trigger tgisdeferred settings.
3565 typedef struct SetConstraintStateData
3567 bool all_isset;
3568 bool all_isdeferred;
3569 int numstates; /* number of trigstates[] entries in use */
3570 int numalloc; /* allocated size of trigstates[] */
3571 SetConstraintTriggerData trigstates[FLEXIBLE_ARRAY_MEMBER];
3572 } SetConstraintStateData;
3574 typedef SetConstraintStateData *SetConstraintState;
3578 * Per-trigger-event data
3580 * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3581 * status bits, up to two tuple CTIDs, and optionally two OIDs of partitions.
3582 * Each event record also has an associated AfterTriggerSharedData that is
3583 * shared across all instances of similar events within a "chunk".
3585 * For row-level triggers, we arrange not to waste storage on unneeded ctid
3586 * fields. Updates of regular tables use two; inserts and deletes of regular
3587 * tables use one; foreign tables always use zero and save the tuple(s) to a
3588 * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3589 * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3590 * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3591 * tuple(s). This permits storing tuples once regardless of the number of
3592 * row-level triggers on a foreign table.
3594 * When updates on partitioned tables cause rows to move between partitions,
3595 * the OIDs of both partitions are stored too, so that the tuples can be
3596 * fetched; such entries are marked AFTER_TRIGGER_CP_UPDATE (for "cross-
3597 * partition update").
3599 * Note that we need triggers on foreign tables to be fired in exactly the
3600 * order they were queued, so that the tuples come out of the tuplestore in
3601 * the right order. To ensure that, we forbid deferrable (constraint)
3602 * triggers on foreign tables. This also ensures that such triggers do not
3603 * get deferred into outer trigger query levels, meaning that it's okay to
3604 * destroy the tuplestore at the end of the query level.
3606 * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3607 * require no ctid field. We lack the flag bit space to neatly represent that
3608 * distinct case, and it seems unlikely to be worth much trouble.
3610 * Note: ats_firing_id is initially zero and is set to something else when
3611 * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3612 * cycle the trigger will be fired in (or was fired in, if DONE is set).
3613 * Although this is mutable state, we can keep it in AfterTriggerSharedData
3614 * because all instances of the same type of event in a given event list will
3615 * be fired at the same time, if they were queued between the same firing
3616 * cycles. So we need only ensure that ats_firing_id is zero when attaching
3617 * a new event to an existing AfterTriggerSharedData record.
3619 typedef uint32 TriggerFlags;
3621 #define AFTER_TRIGGER_OFFSET 0x07FFFFFF /* must be low-order bits */
3622 #define AFTER_TRIGGER_DONE 0x80000000
3623 #define AFTER_TRIGGER_IN_PROGRESS 0x40000000
3624 /* bits describing the size and tuple sources of this event */
3625 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3626 #define AFTER_TRIGGER_FDW_FETCH 0x20000000
3627 #define AFTER_TRIGGER_1CTID 0x10000000
3628 #define AFTER_TRIGGER_2CTID 0x30000000
3629 #define AFTER_TRIGGER_CP_UPDATE 0x08000000
3630 #define AFTER_TRIGGER_TUP_BITS 0x38000000
3631 typedef struct AfterTriggerSharedData *AfterTriggerShared;
3633 typedef struct AfterTriggerSharedData
3635 TriggerEvent ats_event; /* event type indicator, see trigger.h */
3636 Oid ats_tgoid; /* the trigger's ID */
3637 Oid ats_relid; /* the relation it's on */
3638 CommandId ats_firing_id; /* ID for firing cycle */
3639 struct AfterTriggersTableData *ats_table; /* transition table access */
3640 Bitmapset *ats_modifiedcols; /* modified columns */
3641 } AfterTriggerSharedData;
3643 typedef struct AfterTriggerEventData *AfterTriggerEvent;
3645 typedef struct AfterTriggerEventData
3647 TriggerFlags ate_flags; /* status bits and offset to shared data */
3648 ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3649 ItemPointerData ate_ctid2; /* new updated tuple */
3652 * During a cross-partition update of a partitioned table, we also store
3653 * the OIDs of source and destination partitions that are needed to fetch
3654 * the old (ctid1) and the new tuple (ctid2) from, respectively.
3656 Oid ate_src_part;
3657 Oid ate_dst_part;
3658 } AfterTriggerEventData;
3660 /* AfterTriggerEventData, minus ate_src_part, ate_dst_part */
3661 typedef struct AfterTriggerEventDataNoOids
3663 TriggerFlags ate_flags;
3664 ItemPointerData ate_ctid1;
3665 ItemPointerData ate_ctid2;
3666 } AfterTriggerEventDataNoOids;
3668 /* AfterTriggerEventData, minus ate_*_part and ate_ctid2 */
3669 typedef struct AfterTriggerEventDataOneCtid
3671 TriggerFlags ate_flags; /* status bits and offset to shared data */
3672 ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3673 } AfterTriggerEventDataOneCtid;
3675 /* AfterTriggerEventData, minus ate_*_part, ate_ctid1 and ate_ctid2 */
3676 typedef struct AfterTriggerEventDataZeroCtids
3678 TriggerFlags ate_flags; /* status bits and offset to shared data */
3679 } AfterTriggerEventDataZeroCtids;
3681 #define SizeofTriggerEvent(evt) \
3682 (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_CP_UPDATE ? \
3683 sizeof(AfterTriggerEventData) : \
3684 (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3685 sizeof(AfterTriggerEventDataNoOids) : \
3686 (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3687 sizeof(AfterTriggerEventDataOneCtid) : \
3688 sizeof(AfterTriggerEventDataZeroCtids))))
3690 #define GetTriggerSharedData(evt) \
3691 ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3694 * To avoid palloc overhead, we keep trigger events in arrays in successively-
3695 * larger chunks (a slightly more sophisticated version of an expansible
3696 * array). The space between CHUNK_DATA_START and freeptr is occupied by
3697 * AfterTriggerEventData records; the space between endfree and endptr is
3698 * occupied by AfterTriggerSharedData records.
3700 typedef struct AfterTriggerEventChunk
3702 struct AfterTriggerEventChunk *next; /* list link */
3703 char *freeptr; /* start of free space in chunk */
3704 char *endfree; /* end of free space in chunk */
3705 char *endptr; /* end of chunk */
3706 /* event data follows here */
3707 } AfterTriggerEventChunk;
3709 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3711 /* A list of events */
3712 typedef struct AfterTriggerEventList
3714 AfterTriggerEventChunk *head;
3715 AfterTriggerEventChunk *tail;
3716 char *tailfree; /* freeptr of tail chunk */
3717 } AfterTriggerEventList;
3719 /* Macros to help in iterating over a list of events */
3720 #define for_each_chunk(cptr, evtlist) \
3721 for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3722 #define for_each_event(eptr, cptr) \
3723 for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3724 (char *) eptr < (cptr)->freeptr; \
3725 eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3726 /* Use this if no special per-chunk processing is needed */
3727 #define for_each_event_chunk(eptr, cptr, evtlist) \
3728 for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3730 /* Macros for iterating from a start point that might not be list start */
3731 #define for_each_chunk_from(cptr) \
3732 for (; cptr != NULL; cptr = cptr->next)
3733 #define for_each_event_from(eptr, cptr) \
3734 for (; \
3735 (char *) eptr < (cptr)->freeptr; \
3736 eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3740 * All per-transaction data for the AFTER TRIGGERS module.
3742 * AfterTriggersData has the following fields:
3744 * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3745 * We mark firable events with the current firing cycle's ID so that we can
3746 * tell which ones to work on. This ensures sane behavior if a trigger
3747 * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3748 * only fire those events that weren't already scheduled for firing.
3750 * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3751 * This is saved and restored across failed subtransactions.
3753 * events is the current list of deferred events. This is global across
3754 * all subtransactions of the current transaction. In a subtransaction
3755 * abort, we know that the events added by the subtransaction are at the
3756 * end of the list, so it is relatively easy to discard them. The event
3757 * list chunks themselves are stored in event_cxt.
3759 * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3760 * (-1 when the stack is empty).
3762 * query_stack[query_depth] is the per-query-level data, including these fields:
3764 * events is a list of AFTER trigger events queued by the current query.
3765 * None of these are valid until the matching AfterTriggerEndQuery call
3766 * occurs. At that point we fire immediate-mode triggers, and append any
3767 * deferred events to the main events list.
3769 * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3770 * needed by events queued by the current query. (Note: we use just one
3771 * tuplestore even though more than one foreign table might be involved.
3772 * This is okay because tuplestores don't really care what's in the tuples
3773 * they store; but it's possible that someday it'd break.)
3775 * tables is a List of AfterTriggersTableData structs for target tables
3776 * of the current query (see below).
3778 * maxquerydepth is just the allocated length of query_stack.
3780 * trans_stack holds per-subtransaction data, including these fields:
3782 * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3783 * state data. Each subtransaction level that modifies that state first
3784 * saves a copy, which we use to restore the state if we abort.
3786 * events is a copy of the events head/tail pointers,
3787 * which we use to restore those values during subtransaction abort.
3789 * query_depth is the subtransaction-start-time value of query_depth,
3790 * which we similarly use to clean up at subtransaction abort.
3792 * firing_counter is the subtransaction-start-time value of firing_counter.
3793 * We use this to recognize which deferred triggers were fired (or marked
3794 * for firing) within an aborted subtransaction.
3796 * We use GetCurrentTransactionNestLevel() to determine the correct array
3797 * index in trans_stack. maxtransdepth is the number of allocated entries in
3798 * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3799 * in cases where errors during subxact abort cause multiple invocations
3800 * of AfterTriggerEndSubXact() at the same nesting depth.)
3802 * We create an AfterTriggersTableData struct for each target table of the
3803 * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3804 * either transition tables or statement-level triggers. This is used to
3805 * hold the relevant transition tables, as well as info tracking whether
3806 * we already queued the statement triggers. (We use that info to prevent
3807 * firing the same statement triggers more than once per statement, or really
3808 * once per transition table set.) These structs, along with the transition
3809 * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3810 * That's sufficient lifespan because we don't allow transition tables to be
3811 * used by deferrable triggers, so they only need to survive until
3812 * AfterTriggerEndQuery.
3814 typedef struct AfterTriggersQueryData AfterTriggersQueryData;
3815 typedef struct AfterTriggersTransData AfterTriggersTransData;
3816 typedef struct AfterTriggersTableData AfterTriggersTableData;
3818 typedef struct AfterTriggersData
3820 CommandId firing_counter; /* next firing ID to assign */
3821 SetConstraintState state; /* the active S C state */
3822 AfterTriggerEventList events; /* deferred-event list */
3823 MemoryContext event_cxt; /* memory context for events, if any */
3825 /* per-query-level data: */
3826 AfterTriggersQueryData *query_stack; /* array of structs shown below */
3827 int query_depth; /* current index in above array */
3828 int maxquerydepth; /* allocated len of above array */
3830 /* per-subtransaction-level data: */
3831 AfterTriggersTransData *trans_stack; /* array of structs shown below */
3832 int maxtransdepth; /* allocated len of above array */
3833 } AfterTriggersData;
3835 struct AfterTriggersQueryData
3837 AfterTriggerEventList events; /* events pending from this query */
3838 Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3839 List *tables; /* list of AfterTriggersTableData, see below */
3842 struct AfterTriggersTransData
3844 /* these fields are just for resetting at subtrans abort: */
3845 SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3846 AfterTriggerEventList events; /* saved list pointer */
3847 int query_depth; /* saved query_depth */
3848 CommandId firing_counter; /* saved firing_counter */
3851 struct AfterTriggersTableData
3853 /* relid + cmdType form the lookup key for these structs: */
3854 Oid relid; /* target table's OID */
3855 CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3856 bool closed; /* true when no longer OK to add tuples */
3857 bool before_trig_done; /* did we already queue BS triggers? */
3858 bool after_trig_done; /* did we already queue AS triggers? */
3859 AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3862 * We maintain separate transition tables for UPDATE/INSERT/DELETE since
3863 * MERGE can run all three actions in a single statement. Note that UPDATE
3864 * needs both old and new transition tables whereas INSERT needs only new,
3865 * and DELETE needs only old.
3868 /* "old" transition table for UPDATE, if any */
3869 Tuplestorestate *old_upd_tuplestore;
3870 /* "new" transition table for UPDATE, if any */
3871 Tuplestorestate *new_upd_tuplestore;
3872 /* "old" transition table for DELETE, if any */
3873 Tuplestorestate *old_del_tuplestore;
3874 /* "new" transition table for INSERT, if any */
3875 Tuplestorestate *new_ins_tuplestore;
3877 TupleTableSlot *storeslot; /* for converting to tuplestore's format */
3880 static AfterTriggersData afterTriggers;
3882 static void AfterTriggerExecute(EState *estate,
3883 AfterTriggerEvent event,
3884 ResultRelInfo *relInfo,
3885 ResultRelInfo *src_relInfo,
3886 ResultRelInfo *dst_relInfo,
3887 TriggerDesc *trigdesc,
3888 FmgrInfo *finfo,
3889 Instrumentation *instr,
3890 MemoryContext per_tuple_context,
3891 TupleTableSlot *trig_tuple_slot1,
3892 TupleTableSlot *trig_tuple_slot2);
3893 static AfterTriggersTableData *GetAfterTriggersTableData(Oid relid,
3894 CmdType cmdType);
3895 static TupleTableSlot *GetAfterTriggersStoreSlot(AfterTriggersTableData *table,
3896 TupleDesc tupdesc);
3897 static Tuplestorestate *GetAfterTriggersTransitionTable(int event,
3898 TupleTableSlot *oldslot,
3899 TupleTableSlot *newslot,
3900 TransitionCaptureState *transition_capture);
3901 static void TransitionTableAddTuple(EState *estate,
3902 TransitionCaptureState *transition_capture,
3903 ResultRelInfo *relinfo,
3904 TupleTableSlot *slot,
3905 TupleTableSlot *original_insert_tuple,
3906 Tuplestorestate *tuplestore);
3907 static void AfterTriggerFreeQuery(AfterTriggersQueryData *qs);
3908 static SetConstraintState SetConstraintStateCreate(int numalloc);
3909 static SetConstraintState SetConstraintStateCopy(SetConstraintState origstate);
3910 static SetConstraintState SetConstraintStateAddItem(SetConstraintState state,
3911 Oid tgoid, bool tgisdeferred);
3912 static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3916 * Get the FDW tuplestore for the current trigger query level, creating it
3917 * if necessary.
3919 static Tuplestorestate *
3920 GetCurrentFDWTuplestore(void)
3922 Tuplestorestate *ret;
3924 ret = afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore;
3925 if (ret == NULL)
3927 MemoryContext oldcxt;
3928 ResourceOwner saveResourceOwner;
3931 * Make the tuplestore valid until end of subtransaction. We really
3932 * only need it until AfterTriggerEndQuery().
3934 oldcxt = MemoryContextSwitchTo(CurTransactionContext);
3935 saveResourceOwner = CurrentResourceOwner;
3936 CurrentResourceOwner = CurTransactionResourceOwner;
3938 ret = tuplestore_begin_heap(false, false, work_mem);
3940 CurrentResourceOwner = saveResourceOwner;
3941 MemoryContextSwitchTo(oldcxt);
3943 afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore = ret;
3946 return ret;
3949 /* ----------
3950 * afterTriggerCheckState()
3952 * Returns true if the trigger event is actually in state DEFERRED.
3953 * ----------
3955 static bool
3956 afterTriggerCheckState(AfterTriggerShared evtshared)
3958 Oid tgoid = evtshared->ats_tgoid;
3959 SetConstraintState state = afterTriggers.state;
3960 int i;
3963 * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3964 * constraints declared NOT DEFERRABLE), the state is always false.
3966 if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3967 return false;
3970 * If constraint state exists, SET CONSTRAINTS might have been executed
3971 * either for this trigger or for all triggers.
3973 if (state != NULL)
3975 /* Check for SET CONSTRAINTS for this specific trigger. */
3976 for (i = 0; i < state->numstates; i++)
3978 if (state->trigstates[i].sct_tgoid == tgoid)
3979 return state->trigstates[i].sct_tgisdeferred;
3982 /* Check for SET CONSTRAINTS ALL. */
3983 if (state->all_isset)
3984 return state->all_isdeferred;
3988 * Otherwise return the default state for the trigger.
3990 return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3993 /* ----------
3994 * afterTriggerCopyBitmap()
3996 * Copy bitmap into AfterTriggerEvents memory context, which is where the after
3997 * trigger events are kept.
3998 * ----------
4000 static Bitmapset *
4001 afterTriggerCopyBitmap(Bitmapset *src)
4003 Bitmapset *dst;
4004 MemoryContext oldcxt;
4006 if (src == NULL)
4007 return NULL;
4009 /* Create event context if we didn't already */
4010 if (afterTriggers.event_cxt == NULL)
4011 afterTriggers.event_cxt =
4012 AllocSetContextCreate(TopTransactionContext,
4013 "AfterTriggerEvents",
4014 ALLOCSET_DEFAULT_SIZES);
4016 oldcxt = MemoryContextSwitchTo(afterTriggers.event_cxt);
4018 dst = bms_copy(src);
4020 MemoryContextSwitchTo(oldcxt);
4022 return dst;
4025 /* ----------
4026 * afterTriggerAddEvent()
4028 * Add a new trigger event to the specified queue.
4029 * The passed-in event data is copied.
4030 * ----------
4032 static void
4033 afterTriggerAddEvent(AfterTriggerEventList *events,
4034 AfterTriggerEvent event, AfterTriggerShared evtshared)
4036 Size eventsize = SizeofTriggerEvent(event);
4037 Size needed = eventsize + sizeof(AfterTriggerSharedData);
4038 AfterTriggerEventChunk *chunk;
4039 AfterTriggerShared newshared;
4040 AfterTriggerEvent newevent;
4043 * If empty list or not enough room in the tail chunk, make a new chunk.
4044 * We assume here that a new shared record will always be needed.
4046 chunk = events->tail;
4047 if (chunk == NULL ||
4048 chunk->endfree - chunk->freeptr < needed)
4050 Size chunksize;
4052 /* Create event context if we didn't already */
4053 if (afterTriggers.event_cxt == NULL)
4054 afterTriggers.event_cxt =
4055 AllocSetContextCreate(TopTransactionContext,
4056 "AfterTriggerEvents",
4057 ALLOCSET_DEFAULT_SIZES);
4060 * Chunk size starts at 1KB and is allowed to increase up to 1MB.
4061 * These numbers are fairly arbitrary, though there is a hard limit at
4062 * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
4063 * shared records using the available space in ate_flags. Another
4064 * constraint is that if the chunk size gets too huge, the search loop
4065 * below would get slow given a (not too common) usage pattern with
4066 * many distinct event types in a chunk. Therefore, we double the
4067 * preceding chunk size only if there weren't too many shared records
4068 * in the preceding chunk; otherwise we halve it. This gives us some
4069 * ability to adapt to the actual usage pattern of the current query
4070 * while still having large chunk sizes in typical usage. All chunk
4071 * sizes used should be MAXALIGN multiples, to ensure that the shared
4072 * records will be aligned safely.
4074 #define MIN_CHUNK_SIZE 1024
4075 #define MAX_CHUNK_SIZE (1024*1024)
4077 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
4078 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
4079 #endif
4081 if (chunk == NULL)
4082 chunksize = MIN_CHUNK_SIZE;
4083 else
4085 /* preceding chunk size... */
4086 chunksize = chunk->endptr - (char *) chunk;
4087 /* check number of shared records in preceding chunk */
4088 if ((chunk->endptr - chunk->endfree) <=
4089 (100 * sizeof(AfterTriggerSharedData)))
4090 chunksize *= 2; /* okay, double it */
4091 else
4092 chunksize /= 2; /* too many shared records */
4093 chunksize = Min(chunksize, MAX_CHUNK_SIZE);
4095 chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
4096 chunk->next = NULL;
4097 chunk->freeptr = CHUNK_DATA_START(chunk);
4098 chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
4099 Assert(chunk->endfree - chunk->freeptr >= needed);
4101 if (events->tail == NULL)
4103 Assert(events->head == NULL);
4104 events->head = chunk;
4106 else
4107 events->tail->next = chunk;
4108 events->tail = chunk;
4109 /* events->tailfree is now out of sync, but we'll fix it below */
4113 * Try to locate a matching shared-data record already in the chunk. If
4114 * none, make a new one.
4116 for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
4117 (char *) newshared >= chunk->endfree;
4118 newshared--)
4120 if (newshared->ats_tgoid == evtshared->ats_tgoid &&
4121 newshared->ats_relid == evtshared->ats_relid &&
4122 newshared->ats_event == evtshared->ats_event &&
4123 newshared->ats_table == evtshared->ats_table &&
4124 newshared->ats_firing_id == 0)
4125 break;
4127 if ((char *) newshared < chunk->endfree)
4129 *newshared = *evtshared;
4130 newshared->ats_firing_id = 0; /* just to be sure */
4131 chunk->endfree = (char *) newshared;
4134 /* Insert the data */
4135 newevent = (AfterTriggerEvent) chunk->freeptr;
4136 memcpy(newevent, event, eventsize);
4137 /* ... and link the new event to its shared record */
4138 newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
4139 newevent->ate_flags |= (char *) newshared - (char *) newevent;
4141 chunk->freeptr += eventsize;
4142 events->tailfree = chunk->freeptr;
4145 /* ----------
4146 * afterTriggerFreeEventList()
4148 * Free all the event storage in the given list.
4149 * ----------
4151 static void
4152 afterTriggerFreeEventList(AfterTriggerEventList *events)
4154 AfterTriggerEventChunk *chunk;
4156 while ((chunk = events->head) != NULL)
4158 events->head = chunk->next;
4159 pfree(chunk);
4161 events->tail = NULL;
4162 events->tailfree = NULL;
4165 /* ----------
4166 * afterTriggerRestoreEventList()
4168 * Restore an event list to its prior length, removing all the events
4169 * added since it had the value old_events.
4170 * ----------
4172 static void
4173 afterTriggerRestoreEventList(AfterTriggerEventList *events,
4174 const AfterTriggerEventList *old_events)
4176 AfterTriggerEventChunk *chunk;
4177 AfterTriggerEventChunk *next_chunk;
4179 if (old_events->tail == NULL)
4181 /* restoring to a completely empty state, so free everything */
4182 afterTriggerFreeEventList(events);
4184 else
4186 *events = *old_events;
4187 /* free any chunks after the last one we want to keep */
4188 for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
4190 next_chunk = chunk->next;
4191 pfree(chunk);
4193 /* and clean up the tail chunk to be the right length */
4194 events->tail->next = NULL;
4195 events->tail->freeptr = events->tailfree;
4198 * We don't make any effort to remove now-unused shared data records.
4199 * They might still be useful, anyway.
4204 /* ----------
4205 * afterTriggerDeleteHeadEventChunk()
4207 * Remove the first chunk of events from the query level's event list.
4208 * Keep any event list pointers elsewhere in the query level's data
4209 * structures in sync.
4210 * ----------
4212 static void
4213 afterTriggerDeleteHeadEventChunk(AfterTriggersQueryData *qs)
4215 AfterTriggerEventChunk *target = qs->events.head;
4216 ListCell *lc;
4218 Assert(target && target->next);
4221 * First, update any pointers in the per-table data, so that they won't be
4222 * dangling. Resetting obsoleted pointers to NULL will make
4223 * cancel_prior_stmt_triggers start from the list head, which is fine.
4225 foreach(lc, qs->tables)
4227 AfterTriggersTableData *table = (AfterTriggersTableData *) lfirst(lc);
4229 if (table->after_trig_done &&
4230 table->after_trig_events.tail == target)
4232 table->after_trig_events.head = NULL;
4233 table->after_trig_events.tail = NULL;
4234 table->after_trig_events.tailfree = NULL;
4238 /* Now we can flush the head chunk */
4239 qs->events.head = target->next;
4240 pfree(target);
4244 /* ----------
4245 * AfterTriggerExecute()
4247 * Fetch the required tuples back from the heap and fire one
4248 * single trigger function.
4250 * Frequently, this will be fired many times in a row for triggers of
4251 * a single relation. Therefore, we cache the open relation and provide
4252 * fmgr lookup cache space at the caller level. (For triggers fired at
4253 * the end of a query, we can even piggyback on the executor's state.)
4255 * When fired for a cross-partition update of a partitioned table, the old
4256 * tuple is fetched using 'src_relInfo' (the source leaf partition) and
4257 * the new tuple using 'dst_relInfo' (the destination leaf partition), though
4258 * both are converted into the root partitioned table's format before passing
4259 * to the trigger function.
4261 * event: event currently being fired.
4262 * relInfo: result relation for event.
4263 * src_relInfo: source partition of a cross-partition update
4264 * dst_relInfo: its destination partition
4265 * trigdesc: working copy of rel's trigger info.
4266 * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
4267 * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
4268 * or NULL if no instrumentation is wanted.
4269 * per_tuple_context: memory context to call trigger function in.
4270 * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
4271 * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
4272 * ----------
4274 static void
4275 AfterTriggerExecute(EState *estate,
4276 AfterTriggerEvent event,
4277 ResultRelInfo *relInfo,
4278 ResultRelInfo *src_relInfo,
4279 ResultRelInfo *dst_relInfo,
4280 TriggerDesc *trigdesc,
4281 FmgrInfo *finfo, Instrumentation *instr,
4282 MemoryContext per_tuple_context,
4283 TupleTableSlot *trig_tuple_slot1,
4284 TupleTableSlot *trig_tuple_slot2)
4286 Relation rel = relInfo->ri_RelationDesc;
4287 Relation src_rel = src_relInfo->ri_RelationDesc;
4288 Relation dst_rel = dst_relInfo->ri_RelationDesc;
4289 AfterTriggerShared evtshared = GetTriggerSharedData(event);
4290 Oid tgoid = evtshared->ats_tgoid;
4291 TriggerData LocTriggerData = {0};
4292 HeapTuple rettuple;
4293 int tgindx;
4294 bool should_free_trig = false;
4295 bool should_free_new = false;
4298 * Locate trigger in trigdesc. It might not be present, and in fact the
4299 * trigdesc could be NULL, if the trigger was dropped since the event was
4300 * queued. In that case, silently do nothing.
4302 if (trigdesc == NULL)
4303 return;
4304 for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
4306 if (trigdesc->triggers[tgindx].tgoid == tgoid)
4308 LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
4309 break;
4312 if (LocTriggerData.tg_trigger == NULL)
4313 return;
4316 * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
4317 * to include time spent re-fetching tuples in the trigger cost.
4319 if (instr)
4320 InstrStartNode(instr + tgindx);
4323 * Fetch the required tuple(s).
4325 switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
4327 case AFTER_TRIGGER_FDW_FETCH:
4329 Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
4331 if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
4332 trig_tuple_slot1))
4333 elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4335 if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4336 TRIGGER_EVENT_UPDATE &&
4337 !tuplestore_gettupleslot(fdw_tuplestore, true, false,
4338 trig_tuple_slot2))
4339 elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4341 /* fall through */
4342 case AFTER_TRIGGER_FDW_REUSE:
4345 * Store tuple in the slot so that tg_trigtuple does not reference
4346 * tuplestore memory. (It is formally possible for the trigger
4347 * function to queue trigger events that add to the same
4348 * tuplestore, which can push other tuples out of memory.) The
4349 * distinction is academic, because we start with a minimal tuple
4350 * that is stored as a heap tuple, constructed in different memory
4351 * context, in the slot anyway.
4353 LocTriggerData.tg_trigslot = trig_tuple_slot1;
4354 LocTriggerData.tg_trigtuple =
4355 ExecFetchSlotHeapTuple(trig_tuple_slot1, true, &should_free_trig);
4357 if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4358 TRIGGER_EVENT_UPDATE)
4360 LocTriggerData.tg_newslot = trig_tuple_slot2;
4361 LocTriggerData.tg_newtuple =
4362 ExecFetchSlotHeapTuple(trig_tuple_slot2, true, &should_free_new);
4364 else
4366 LocTriggerData.tg_newtuple = NULL;
4368 break;
4370 default:
4371 if (ItemPointerIsValid(&(event->ate_ctid1)))
4373 TupleTableSlot *src_slot = ExecGetTriggerOldSlot(estate,
4374 src_relInfo);
4376 if (!table_tuple_fetch_row_version(src_rel,
4377 &(event->ate_ctid1),
4378 SnapshotAny,
4379 src_slot))
4380 elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4383 * Store the tuple fetched from the source partition into the
4384 * target (root partitioned) table slot, converting if needed.
4386 if (src_relInfo != relInfo)
4388 TupleConversionMap *map = ExecGetChildToRootMap(src_relInfo);
4390 LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
4391 if (map)
4393 execute_attr_map_slot(map->attrMap,
4394 src_slot,
4395 LocTriggerData.tg_trigslot);
4397 else
4398 ExecCopySlot(LocTriggerData.tg_trigslot, src_slot);
4400 else
4401 LocTriggerData.tg_trigslot = src_slot;
4402 LocTriggerData.tg_trigtuple =
4403 ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
4405 else
4407 LocTriggerData.tg_trigtuple = NULL;
4410 /* don't touch ctid2 if not there */
4411 if (((event->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ||
4412 (event->ate_flags & AFTER_TRIGGER_CP_UPDATE)) &&
4413 ItemPointerIsValid(&(event->ate_ctid2)))
4415 TupleTableSlot *dst_slot = ExecGetTriggerNewSlot(estate,
4416 dst_relInfo);
4418 if (!table_tuple_fetch_row_version(dst_rel,
4419 &(event->ate_ctid2),
4420 SnapshotAny,
4421 dst_slot))
4422 elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4425 * Store the tuple fetched from the destination partition into
4426 * the target (root partitioned) table slot, converting if
4427 * needed.
4429 if (dst_relInfo != relInfo)
4431 TupleConversionMap *map = ExecGetChildToRootMap(dst_relInfo);
4433 LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
4434 if (map)
4436 execute_attr_map_slot(map->attrMap,
4437 dst_slot,
4438 LocTriggerData.tg_newslot);
4440 else
4441 ExecCopySlot(LocTriggerData.tg_newslot, dst_slot);
4443 else
4444 LocTriggerData.tg_newslot = dst_slot;
4445 LocTriggerData.tg_newtuple =
4446 ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);
4448 else
4450 LocTriggerData.tg_newtuple = NULL;
4455 * Set up the tuplestore information to let the trigger have access to
4456 * transition tables. When we first make a transition table available to
4457 * a trigger, mark it "closed" so that it cannot change anymore. If any
4458 * additional events of the same type get queued in the current trigger
4459 * query level, they'll go into new transition tables.
4461 LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
4462 if (evtshared->ats_table)
4464 if (LocTriggerData.tg_trigger->tgoldtable)
4466 if (TRIGGER_FIRED_BY_UPDATE(evtshared->ats_event))
4467 LocTriggerData.tg_oldtable = evtshared->ats_table->old_upd_tuplestore;
4468 else
4469 LocTriggerData.tg_oldtable = evtshared->ats_table->old_del_tuplestore;
4470 evtshared->ats_table->closed = true;
4473 if (LocTriggerData.tg_trigger->tgnewtable)
4475 if (TRIGGER_FIRED_BY_INSERT(evtshared->ats_event))
4476 LocTriggerData.tg_newtable = evtshared->ats_table->new_ins_tuplestore;
4477 else
4478 LocTriggerData.tg_newtable = evtshared->ats_table->new_upd_tuplestore;
4479 evtshared->ats_table->closed = true;
4484 * Setup the remaining trigger information
4486 LocTriggerData.type = T_TriggerData;
4487 LocTriggerData.tg_event =
4488 evtshared->ats_event & (TRIGGER_EVENT_OPMASK | TRIGGER_EVENT_ROW);
4489 LocTriggerData.tg_relation = rel;
4490 if (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype))
4491 LocTriggerData.tg_updatedcols = evtshared->ats_modifiedcols;
4493 MemoryContextReset(per_tuple_context);
4496 * Call the trigger and throw away any possibly returned updated tuple.
4497 * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4499 rettuple = ExecCallTriggerFunc(&LocTriggerData,
4500 tgindx,
4501 finfo,
4502 NULL,
4503 per_tuple_context);
4504 if (rettuple != NULL &&
4505 rettuple != LocTriggerData.tg_trigtuple &&
4506 rettuple != LocTriggerData.tg_newtuple)
4507 heap_freetuple(rettuple);
4510 * Release resources
4512 if (should_free_trig)
4513 heap_freetuple(LocTriggerData.tg_trigtuple);
4514 if (should_free_new)
4515 heap_freetuple(LocTriggerData.tg_newtuple);
4517 /* don't clear slots' contents if foreign table */
4518 if (trig_tuple_slot1 == NULL)
4520 if (LocTriggerData.tg_trigslot)
4521 ExecClearTuple(LocTriggerData.tg_trigslot);
4522 if (LocTriggerData.tg_newslot)
4523 ExecClearTuple(LocTriggerData.tg_newslot);
4527 * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4528 * one "tuple returned" (really the number of firings).
4530 if (instr)
4531 InstrStopNode(instr + tgindx, 1);
4536 * afterTriggerMarkEvents()
4538 * Scan the given event list for not yet invoked events. Mark the ones
4539 * that can be invoked now with the current firing ID.
4541 * If move_list isn't NULL, events that are not to be invoked now are
4542 * transferred to move_list.
4544 * When immediate_only is true, do not invoke currently-deferred triggers.
4545 * (This will be false only at main transaction exit.)
4547 * Returns true if any invokable events were found.
4549 static bool
4550 afterTriggerMarkEvents(AfterTriggerEventList *events,
4551 AfterTriggerEventList *move_list,
4552 bool immediate_only)
4554 bool found = false;
4555 bool deferred_found = false;
4556 AfterTriggerEvent event;
4557 AfterTriggerEventChunk *chunk;
4559 for_each_event_chunk(event, chunk, *events)
4561 AfterTriggerShared evtshared = GetTriggerSharedData(event);
4562 bool defer_it = false;
4564 if (!(event->ate_flags &
4565 (AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS)))
4568 * This trigger hasn't been called or scheduled yet. Check if we
4569 * should call it now.
4571 if (immediate_only && afterTriggerCheckState(evtshared))
4573 defer_it = true;
4575 else
4578 * Mark it as to be fired in this firing cycle.
4580 evtshared->ats_firing_id = afterTriggers.firing_counter;
4581 event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4582 found = true;
4587 * If it's deferred, move it to move_list, if requested.
4589 if (defer_it && move_list != NULL)
4591 deferred_found = true;
4592 /* add it to move_list */
4593 afterTriggerAddEvent(move_list, event, evtshared);
4594 /* mark original copy "done" so we don't do it again */
4595 event->ate_flags |= AFTER_TRIGGER_DONE;
4600 * We could allow deferred triggers if, before the end of the
4601 * security-restricted operation, we were to verify that a SET CONSTRAINTS
4602 * ... IMMEDIATE has fired all such triggers. For now, don't bother.
4604 if (deferred_found && InSecurityRestrictedOperation())
4605 ereport(ERROR,
4606 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
4607 errmsg("cannot fire deferred trigger within security-restricted operation")));
4609 return found;
4613 * afterTriggerInvokeEvents()
4615 * Scan the given event list for events that are marked as to be fired
4616 * in the current firing cycle, and fire them.
4618 * If estate isn't NULL, we use its result relation info to avoid repeated
4619 * openings and closing of trigger target relations. If it is NULL, we
4620 * make one locally to cache the info in case there are multiple trigger
4621 * events per rel.
4623 * When delete_ok is true, it's safe to delete fully-processed events.
4624 * (We are not very tense about that: we simply reset a chunk to be empty
4625 * if all its events got fired. The objective here is just to avoid useless
4626 * rescanning of events when a trigger queues new events during transaction
4627 * end, so it's not necessary to worry much about the case where only
4628 * some events are fired.)
4630 * Returns true if no unfired events remain in the list (this allows us
4631 * to avoid repeating afterTriggerMarkEvents).
4633 static bool
4634 afterTriggerInvokeEvents(AfterTriggerEventList *events,
4635 CommandId firing_id,
4636 EState *estate,
4637 bool delete_ok)
4639 bool all_fired = true;
4640 AfterTriggerEventChunk *chunk;
4641 MemoryContext per_tuple_context;
4642 bool local_estate = false;
4643 ResultRelInfo *rInfo = NULL;
4644 Relation rel = NULL;
4645 TriggerDesc *trigdesc = NULL;
4646 FmgrInfo *finfo = NULL;
4647 Instrumentation *instr = NULL;
4648 TupleTableSlot *slot1 = NULL,
4649 *slot2 = NULL;
4651 /* Make a local EState if need be */
4652 if (estate == NULL)
4654 estate = CreateExecutorState();
4655 local_estate = true;
4658 /* Make a per-tuple memory context for trigger function calls */
4659 per_tuple_context =
4660 AllocSetContextCreate(CurrentMemoryContext,
4661 "AfterTriggerTupleContext",
4662 ALLOCSET_DEFAULT_SIZES);
4664 for_each_chunk(chunk, *events)
4666 AfterTriggerEvent event;
4667 bool all_fired_in_chunk = true;
4669 for_each_event(event, chunk)
4671 AfterTriggerShared evtshared = GetTriggerSharedData(event);
4674 * Is it one for me to fire?
4676 if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4677 evtshared->ats_firing_id == firing_id)
4679 ResultRelInfo *src_rInfo,
4680 *dst_rInfo;
4683 * So let's fire it... but first, find the correct relation if
4684 * this is not the same relation as before.
4686 if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4688 rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid,
4689 NULL);
4690 rel = rInfo->ri_RelationDesc;
4691 /* Catch calls with insufficient relcache refcounting */
4692 Assert(!RelationHasReferenceCountZero(rel));
4693 trigdesc = rInfo->ri_TrigDesc;
4694 /* caution: trigdesc could be NULL here */
4695 finfo = rInfo->ri_TrigFunctions;
4696 instr = rInfo->ri_TrigInstrument;
4697 if (slot1 != NULL)
4699 ExecDropSingleTupleTableSlot(slot1);
4700 ExecDropSingleTupleTableSlot(slot2);
4701 slot1 = slot2 = NULL;
4703 if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4705 slot1 = MakeSingleTupleTableSlot(rel->rd_att,
4706 &TTSOpsMinimalTuple);
4707 slot2 = MakeSingleTupleTableSlot(rel->rd_att,
4708 &TTSOpsMinimalTuple);
4713 * Look up source and destination partition result rels of a
4714 * cross-partition update event.
4716 if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
4717 AFTER_TRIGGER_CP_UPDATE)
4719 Assert(OidIsValid(event->ate_src_part) &&
4720 OidIsValid(event->ate_dst_part));
4721 src_rInfo = ExecGetTriggerResultRel(estate,
4722 event->ate_src_part,
4723 rInfo);
4724 dst_rInfo = ExecGetTriggerResultRel(estate,
4725 event->ate_dst_part,
4726 rInfo);
4728 else
4729 src_rInfo = dst_rInfo = rInfo;
4732 * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4733 * still set, so recursive examinations of the event list
4734 * won't try to re-fire it.
4736 AfterTriggerExecute(estate, event, rInfo,
4737 src_rInfo, dst_rInfo,
4738 trigdesc, finfo, instr,
4739 per_tuple_context, slot1, slot2);
4742 * Mark the event as done.
4744 event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4745 event->ate_flags |= AFTER_TRIGGER_DONE;
4747 else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4749 /* something remains to be done */
4750 all_fired = all_fired_in_chunk = false;
4754 /* Clear the chunk if delete_ok and nothing left of interest */
4755 if (delete_ok && all_fired_in_chunk)
4757 chunk->freeptr = CHUNK_DATA_START(chunk);
4758 chunk->endfree = chunk->endptr;
4761 * If it's last chunk, must sync event list's tailfree too. Note
4762 * that delete_ok must NOT be passed as true if there could be
4763 * additional AfterTriggerEventList values pointing at this event
4764 * list, since we'd fail to fix their copies of tailfree.
4766 if (chunk == events->tail)
4767 events->tailfree = chunk->freeptr;
4770 if (slot1 != NULL)
4772 ExecDropSingleTupleTableSlot(slot1);
4773 ExecDropSingleTupleTableSlot(slot2);
4776 /* Release working resources */
4777 MemoryContextDelete(per_tuple_context);
4779 if (local_estate)
4781 ExecCloseResultRelations(estate);
4782 ExecResetTupleTable(estate->es_tupleTable, false);
4783 FreeExecutorState(estate);
4786 return all_fired;
4791 * GetAfterTriggersTableData
4793 * Find or create an AfterTriggersTableData struct for the specified
4794 * trigger event (relation + operation type). Ignore existing structs
4795 * marked "closed"; we don't want to put any additional tuples into them,
4796 * nor change their stmt-triggers-fired state.
4798 * Note: the AfterTriggersTableData list is allocated in the current
4799 * (sub)transaction's CurTransactionContext. This is OK because
4800 * we don't need it to live past AfterTriggerEndQuery.
4802 static AfterTriggersTableData *
4803 GetAfterTriggersTableData(Oid relid, CmdType cmdType)
4805 AfterTriggersTableData *table;
4806 AfterTriggersQueryData *qs;
4807 MemoryContext oldcxt;
4808 ListCell *lc;
4810 /* Caller should have ensured query_depth is OK. */
4811 Assert(afterTriggers.query_depth >= 0 &&
4812 afterTriggers.query_depth < afterTriggers.maxquerydepth);
4813 qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4815 foreach(lc, qs->tables)
4817 table = (AfterTriggersTableData *) lfirst(lc);
4818 if (table->relid == relid && table->cmdType == cmdType &&
4819 !table->closed)
4820 return table;
4823 oldcxt = MemoryContextSwitchTo(CurTransactionContext);
4825 table = (AfterTriggersTableData *) palloc0(sizeof(AfterTriggersTableData));
4826 table->relid = relid;
4827 table->cmdType = cmdType;
4828 qs->tables = lappend(qs->tables, table);
4830 MemoryContextSwitchTo(oldcxt);
4832 return table;
4836 * Returns a TupleTableSlot suitable for holding the tuples to be put
4837 * into AfterTriggersTableData's transition table tuplestores.
4839 static TupleTableSlot *
4840 GetAfterTriggersStoreSlot(AfterTriggersTableData *table,
4841 TupleDesc tupdesc)
4843 /* Create it if not already done. */
4844 if (!table->storeslot)
4846 MemoryContext oldcxt;
4849 * We need this slot only until AfterTriggerEndQuery, but making it
4850 * last till end-of-subxact is good enough. It'll be freed by
4851 * AfterTriggerFreeQuery(). However, the passed-in tupdesc might have
4852 * a different lifespan, so we'd better make a copy of that.
4854 oldcxt = MemoryContextSwitchTo(CurTransactionContext);
4855 tupdesc = CreateTupleDescCopy(tupdesc);
4856 table->storeslot = MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual);
4857 MemoryContextSwitchTo(oldcxt);
4860 return table->storeslot;
4864 * MakeTransitionCaptureState
4866 * Make a TransitionCaptureState object for the given TriggerDesc, target
4867 * relation, and operation type. The TCS object holds all the state needed
4868 * to decide whether to capture tuples in transition tables.
4870 * If there are no triggers in 'trigdesc' that request relevant transition
4871 * tables, then return NULL.
4873 * The resulting object can be passed to the ExecAR* functions. When
4874 * dealing with child tables, the caller can set tcs_original_insert_tuple
4875 * to avoid having to reconstruct the original tuple in the root table's
4876 * format.
4878 * Note that we copy the flags from a parent table into this struct (rather
4879 * than subsequently using the relation's TriggerDesc directly) so that we can
4880 * use it to control collection of transition tuples from child tables.
4882 * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4883 * on the same table during one query should share one transition table.
4884 * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4885 * looked up using the table OID + CmdType, and are merely referenced by
4886 * the TransitionCaptureState objects we hand out to callers.
4888 TransitionCaptureState *
4889 MakeTransitionCaptureState(TriggerDesc *trigdesc, Oid relid, CmdType cmdType)
4891 TransitionCaptureState *state;
4892 bool need_old_upd,
4893 need_new_upd,
4894 need_old_del,
4895 need_new_ins;
4896 AfterTriggersTableData *table;
4897 MemoryContext oldcxt;
4898 ResourceOwner saveResourceOwner;
4900 if (trigdesc == NULL)
4901 return NULL;
4903 /* Detect which table(s) we need. */
4904 switch (cmdType)
4906 case CMD_INSERT:
4907 need_old_upd = need_old_del = need_new_upd = false;
4908 need_new_ins = trigdesc->trig_insert_new_table;
4909 break;
4910 case CMD_UPDATE:
4911 need_old_upd = trigdesc->trig_update_old_table;
4912 need_new_upd = trigdesc->trig_update_new_table;
4913 need_old_del = need_new_ins = false;
4914 break;
4915 case CMD_DELETE:
4916 need_old_del = trigdesc->trig_delete_old_table;
4917 need_old_upd = need_new_upd = need_new_ins = false;
4918 break;
4919 case CMD_MERGE:
4920 need_old_upd = trigdesc->trig_update_old_table;
4921 need_new_upd = trigdesc->trig_update_new_table;
4922 need_old_del = trigdesc->trig_delete_old_table;
4923 need_new_ins = trigdesc->trig_insert_new_table;
4924 break;
4925 default:
4926 elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
4927 /* keep compiler quiet */
4928 need_old_upd = need_new_upd = need_old_del = need_new_ins = false;
4929 break;
4931 if (!need_old_upd && !need_new_upd && !need_new_ins && !need_old_del)
4932 return NULL;
4934 /* Check state, like AfterTriggerSaveEvent. */
4935 if (afterTriggers.query_depth < 0)
4936 elog(ERROR, "MakeTransitionCaptureState() called outside of query");
4938 /* Be sure we have enough space to record events at this query depth. */
4939 if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4940 AfterTriggerEnlargeQueryState();
4943 * Find or create an AfterTriggersTableData struct to hold the
4944 * tuplestore(s). If there's a matching struct but it's marked closed,
4945 * ignore it; we need a newer one.
4947 * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4948 * allocated in the current (sub)transaction's CurTransactionContext, and
4949 * the tuplestores are managed by the (sub)transaction's resource owner.
4950 * This is sufficient lifespan because we do not allow triggers using
4951 * transition tables to be deferrable; they will be fired during
4952 * AfterTriggerEndQuery, after which it's okay to delete the data.
4954 table = GetAfterTriggersTableData(relid, cmdType);
4956 /* Now create required tuplestore(s), if we don't have them already. */
4957 oldcxt = MemoryContextSwitchTo(CurTransactionContext);
4958 saveResourceOwner = CurrentResourceOwner;
4959 CurrentResourceOwner = CurTransactionResourceOwner;
4961 if (need_old_upd && table->old_upd_tuplestore == NULL)
4962 table->old_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4963 if (need_new_upd && table->new_upd_tuplestore == NULL)
4964 table->new_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4965 if (need_old_del && table->old_del_tuplestore == NULL)
4966 table->old_del_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4967 if (need_new_ins && table->new_ins_tuplestore == NULL)
4968 table->new_ins_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4970 CurrentResourceOwner = saveResourceOwner;
4971 MemoryContextSwitchTo(oldcxt);
4973 /* Now build the TransitionCaptureState struct, in caller's context */
4974 state = (TransitionCaptureState *) palloc0(sizeof(TransitionCaptureState));
4975 state->tcs_delete_old_table = trigdesc->trig_delete_old_table;
4976 state->tcs_update_old_table = trigdesc->trig_update_old_table;
4977 state->tcs_update_new_table = trigdesc->trig_update_new_table;
4978 state->tcs_insert_new_table = trigdesc->trig_insert_new_table;
4979 state->tcs_private = table;
4981 return state;
4985 /* ----------
4986 * AfterTriggerBeginXact()
4988 * Called at transaction start (either BEGIN or implicit for single
4989 * statement outside of transaction block).
4990 * ----------
4992 void
4993 AfterTriggerBeginXact(void)
4996 * Initialize after-trigger state structure to empty
4998 afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4999 afterTriggers.query_depth = -1;
5002 * Verify that there is no leftover state remaining. If these assertions
5003 * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
5004 * up properly.
5006 Assert(afterTriggers.state == NULL);
5007 Assert(afterTriggers.query_stack == NULL);
5008 Assert(afterTriggers.maxquerydepth == 0);
5009 Assert(afterTriggers.event_cxt == NULL);
5010 Assert(afterTriggers.events.head == NULL);
5011 Assert(afterTriggers.trans_stack == NULL);
5012 Assert(afterTriggers.maxtransdepth == 0);
5016 /* ----------
5017 * AfterTriggerBeginQuery()
5019 * Called just before we start processing a single query within a
5020 * transaction (or subtransaction). Most of the real work gets deferred
5021 * until somebody actually tries to queue a trigger event.
5022 * ----------
5024 void
5025 AfterTriggerBeginQuery(void)
5027 /* Increase the query stack depth */
5028 afterTriggers.query_depth++;
5032 /* ----------
5033 * AfterTriggerEndQuery()
5035 * Called after one query has been completely processed. At this time
5036 * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
5037 * transfer deferred trigger events to the global deferred-trigger list.
5039 * Note that this must be called BEFORE closing down the executor
5040 * with ExecutorEnd, because we make use of the EState's info about
5041 * target relations. Normally it is called from ExecutorFinish.
5042 * ----------
5044 void
5045 AfterTriggerEndQuery(EState *estate)
5047 AfterTriggersQueryData *qs;
5049 /* Must be inside a query, too */
5050 Assert(afterTriggers.query_depth >= 0);
5053 * If we never even got as far as initializing the event stack, there
5054 * certainly won't be any events, so exit quickly.
5056 if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5058 afterTriggers.query_depth--;
5059 return;
5063 * Process all immediate-mode triggers queued by the query, and move the
5064 * deferred ones to the main list of deferred events.
5066 * Notice that we decide which ones will be fired, and put the deferred
5067 * ones on the main list, before anything is actually fired. This ensures
5068 * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
5069 * IMMEDIATE: all events we have decided to defer will be available for it
5070 * to fire.
5072 * We loop in case a trigger queues more events at the same query level.
5073 * Ordinary trigger functions, including all PL/pgSQL trigger functions,
5074 * will instead fire any triggers in a dedicated query level. Foreign key
5075 * enforcement triggers do add to the current query level, thanks to their
5076 * passing fire_triggers = false to SPI_execute_snapshot(). Other
5077 * C-language triggers might do likewise.
5079 * If we find no firable events, we don't have to increment
5080 * firing_counter.
5082 qs = &afterTriggers.query_stack[afterTriggers.query_depth];
5084 for (;;)
5086 if (afterTriggerMarkEvents(&qs->events, &afterTriggers.events, true))
5088 CommandId firing_id = afterTriggers.firing_counter++;
5089 AfterTriggerEventChunk *oldtail = qs->events.tail;
5091 if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
5092 break; /* all fired */
5095 * Firing a trigger could result in query_stack being repalloc'd,
5096 * so we must recalculate qs after each afterTriggerInvokeEvents
5097 * call. Furthermore, it's unsafe to pass delete_ok = true here,
5098 * because that could cause afterTriggerInvokeEvents to try to
5099 * access qs->events after the stack has been repalloc'd.
5101 qs = &afterTriggers.query_stack[afterTriggers.query_depth];
5104 * We'll need to scan the events list again. To reduce the cost
5105 * of doing so, get rid of completely-fired chunks. We know that
5106 * all events were marked IN_PROGRESS or DONE at the conclusion of
5107 * afterTriggerMarkEvents, so any still-interesting events must
5108 * have been added after that, and so must be in the chunk that
5109 * was then the tail chunk, or in later chunks. So, zap all
5110 * chunks before oldtail. This is approximately the same set of
5111 * events we would have gotten rid of by passing delete_ok = true.
5113 Assert(oldtail != NULL);
5114 while (qs->events.head != oldtail)
5115 afterTriggerDeleteHeadEventChunk(qs);
5117 else
5118 break;
5121 /* Release query-level-local storage, including tuplestores if any */
5122 AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
5124 afterTriggers.query_depth--;
5129 * AfterTriggerFreeQuery
5130 * Release subsidiary storage for a trigger query level.
5131 * This includes closing down tuplestores.
5132 * Note: it's important for this to be safe if interrupted by an error
5133 * and then called again for the same query level.
5135 static void
5136 AfterTriggerFreeQuery(AfterTriggersQueryData *qs)
5138 Tuplestorestate *ts;
5139 List *tables;
5140 ListCell *lc;
5142 /* Drop the trigger events */
5143 afterTriggerFreeEventList(&qs->events);
5145 /* Drop FDW tuplestore if any */
5146 ts = qs->fdw_tuplestore;
5147 qs->fdw_tuplestore = NULL;
5148 if (ts)
5149 tuplestore_end(ts);
5151 /* Release per-table subsidiary storage */
5152 tables = qs->tables;
5153 foreach(lc, tables)
5155 AfterTriggersTableData *table = (AfterTriggersTableData *) lfirst(lc);
5157 ts = table->old_upd_tuplestore;
5158 table->old_upd_tuplestore = NULL;
5159 if (ts)
5160 tuplestore_end(ts);
5161 ts = table->new_upd_tuplestore;
5162 table->new_upd_tuplestore = NULL;
5163 if (ts)
5164 tuplestore_end(ts);
5165 ts = table->old_del_tuplestore;
5166 table->old_del_tuplestore = NULL;
5167 if (ts)
5168 tuplestore_end(ts);
5169 ts = table->new_ins_tuplestore;
5170 table->new_ins_tuplestore = NULL;
5171 if (ts)
5172 tuplestore_end(ts);
5173 if (table->storeslot)
5175 TupleTableSlot *slot = table->storeslot;
5177 table->storeslot = NULL;
5178 ExecDropSingleTupleTableSlot(slot);
5183 * Now free the AfterTriggersTableData structs and list cells. Reset list
5184 * pointer first; if list_free_deep somehow gets an error, better to leak
5185 * that storage than have an infinite loop.
5187 qs->tables = NIL;
5188 list_free_deep(tables);
5192 /* ----------
5193 * AfterTriggerFireDeferred()
5195 * Called just before the current transaction is committed. At this
5196 * time we invoke all pending DEFERRED triggers.
5198 * It is possible for other modules to queue additional deferred triggers
5199 * during pre-commit processing; therefore xact.c may have to call this
5200 * multiple times.
5201 * ----------
5203 void
5204 AfterTriggerFireDeferred(void)
5206 AfterTriggerEventList *events;
5207 bool snap_pushed = false;
5209 /* Must not be inside a query */
5210 Assert(afterTriggers.query_depth == -1);
5213 * If there are any triggers to fire, make sure we have set a snapshot for
5214 * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
5215 * can't assume ActiveSnapshot is valid on entry.)
5217 events = &afterTriggers.events;
5218 if (events->head != NULL)
5220 PushActiveSnapshot(GetTransactionSnapshot());
5221 snap_pushed = true;
5225 * Run all the remaining triggers. Loop until they are all gone, in case
5226 * some trigger queues more for us to do.
5228 while (afterTriggerMarkEvents(events, NULL, false))
5230 CommandId firing_id = afterTriggers.firing_counter++;
5232 if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
5233 break; /* all fired */
5237 * We don't bother freeing the event list, since it will go away anyway
5238 * (and more efficiently than via pfree) in AfterTriggerEndXact.
5241 if (snap_pushed)
5242 PopActiveSnapshot();
5246 /* ----------
5247 * AfterTriggerEndXact()
5249 * The current transaction is finishing.
5251 * Any unfired triggers are canceled so we simply throw
5252 * away anything we know.
5254 * Note: it is possible for this to be called repeatedly in case of
5255 * error during transaction abort; therefore, do not complain if
5256 * already closed down.
5257 * ----------
5259 void
5260 AfterTriggerEndXact(bool isCommit)
5263 * Forget the pending-events list.
5265 * Since all the info is in TopTransactionContext or children thereof, we
5266 * don't really need to do anything to reclaim memory. However, the
5267 * pending-events list could be large, and so it's useful to discard it as
5268 * soon as possible --- especially if we are aborting because we ran out
5269 * of memory for the list!
5271 if (afterTriggers.event_cxt)
5273 MemoryContextDelete(afterTriggers.event_cxt);
5274 afterTriggers.event_cxt = NULL;
5275 afterTriggers.events.head = NULL;
5276 afterTriggers.events.tail = NULL;
5277 afterTriggers.events.tailfree = NULL;
5281 * Forget any subtransaction state as well. Since this can't be very
5282 * large, we let the eventual reset of TopTransactionContext free the
5283 * memory instead of doing it here.
5285 afterTriggers.trans_stack = NULL;
5286 afterTriggers.maxtransdepth = 0;
5290 * Forget the query stack and constraint-related state information. As
5291 * with the subtransaction state information, we don't bother freeing the
5292 * memory here.
5294 afterTriggers.query_stack = NULL;
5295 afterTriggers.maxquerydepth = 0;
5296 afterTriggers.state = NULL;
5298 /* No more afterTriggers manipulation until next transaction starts. */
5299 afterTriggers.query_depth = -1;
5303 * AfterTriggerBeginSubXact()
5305 * Start a subtransaction.
5307 void
5308 AfterTriggerBeginSubXact(void)
5310 int my_level = GetCurrentTransactionNestLevel();
5313 * Allocate more space in the trans_stack if needed. (Note: because the
5314 * minimum nest level of a subtransaction is 2, we waste the first couple
5315 * entries of the array; not worth the notational effort to avoid it.)
5317 while (my_level >= afterTriggers.maxtransdepth)
5319 if (afterTriggers.maxtransdepth == 0)
5321 /* Arbitrarily initialize for max of 8 subtransaction levels */
5322 afterTriggers.trans_stack = (AfterTriggersTransData *)
5323 MemoryContextAlloc(TopTransactionContext,
5324 8 * sizeof(AfterTriggersTransData));
5325 afterTriggers.maxtransdepth = 8;
5327 else
5329 /* repalloc will keep the stack in the same context */
5330 int new_alloc = afterTriggers.maxtransdepth * 2;
5332 afterTriggers.trans_stack = (AfterTriggersTransData *)
5333 repalloc(afterTriggers.trans_stack,
5334 new_alloc * sizeof(AfterTriggersTransData));
5335 afterTriggers.maxtransdepth = new_alloc;
5340 * Push the current information into the stack. The SET CONSTRAINTS state
5341 * is not saved until/unless changed. Likewise, we don't make a
5342 * per-subtransaction event context until needed.
5344 afterTriggers.trans_stack[my_level].state = NULL;
5345 afterTriggers.trans_stack[my_level].events = afterTriggers.events;
5346 afterTriggers.trans_stack[my_level].query_depth = afterTriggers.query_depth;
5347 afterTriggers.trans_stack[my_level].firing_counter = afterTriggers.firing_counter;
5351 * AfterTriggerEndSubXact()
5353 * The current subtransaction is ending.
5355 void
5356 AfterTriggerEndSubXact(bool isCommit)
5358 int my_level = GetCurrentTransactionNestLevel();
5359 SetConstraintState state;
5360 AfterTriggerEvent event;
5361 AfterTriggerEventChunk *chunk;
5362 CommandId subxact_firing_id;
5365 * Pop the prior state if needed.
5367 if (isCommit)
5369 Assert(my_level < afterTriggers.maxtransdepth);
5370 /* If we saved a prior state, we don't need it anymore */
5371 state = afterTriggers.trans_stack[my_level].state;
5372 if (state != NULL)
5373 pfree(state);
5374 /* this avoids double pfree if error later: */
5375 afterTriggers.trans_stack[my_level].state = NULL;
5376 Assert(afterTriggers.query_depth ==
5377 afterTriggers.trans_stack[my_level].query_depth);
5379 else
5382 * Aborting. It is possible subxact start failed before calling
5383 * AfterTriggerBeginSubXact, in which case we mustn't risk touching
5384 * trans_stack levels that aren't there.
5386 if (my_level >= afterTriggers.maxtransdepth)
5387 return;
5390 * Release query-level storage for queries being aborted, and restore
5391 * query_depth to its pre-subxact value. This assumes that a
5392 * subtransaction will not add events to query levels started in a
5393 * earlier transaction state.
5395 while (afterTriggers.query_depth > afterTriggers.trans_stack[my_level].query_depth)
5397 if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
5398 AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
5399 afterTriggers.query_depth--;
5401 Assert(afterTriggers.query_depth ==
5402 afterTriggers.trans_stack[my_level].query_depth);
5405 * Restore the global deferred-event list to its former length,
5406 * discarding any events queued by the subxact.
5408 afterTriggerRestoreEventList(&afterTriggers.events,
5409 &afterTriggers.trans_stack[my_level].events);
5412 * Restore the trigger state. If the saved state is NULL, then this
5413 * subxact didn't save it, so it doesn't need restoring.
5415 state = afterTriggers.trans_stack[my_level].state;
5416 if (state != NULL)
5418 pfree(afterTriggers.state);
5419 afterTriggers.state = state;
5421 /* this avoids double pfree if error later: */
5422 afterTriggers.trans_stack[my_level].state = NULL;
5425 * Scan for any remaining deferred events that were marked DONE or IN
5426 * PROGRESS by this subxact or a child, and un-mark them. We can
5427 * recognize such events because they have a firing ID greater than or
5428 * equal to the firing_counter value we saved at subtransaction start.
5429 * (This essentially assumes that the current subxact includes all
5430 * subxacts started after it.)
5432 subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter;
5433 for_each_event_chunk(event, chunk, afterTriggers.events)
5435 AfterTriggerShared evtshared = GetTriggerSharedData(event);
5437 if (event->ate_flags &
5438 (AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS))
5440 if (evtshared->ats_firing_id >= subxact_firing_id)
5441 event->ate_flags &=
5442 ~(AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS);
5449 * Get the transition table for the given event and depending on whether we are
5450 * processing the old or the new tuple.
5452 static Tuplestorestate *
5453 GetAfterTriggersTransitionTable(int event,
5454 TupleTableSlot *oldslot,
5455 TupleTableSlot *newslot,
5456 TransitionCaptureState *transition_capture)
5458 Tuplestorestate *tuplestore = NULL;
5459 bool delete_old_table = transition_capture->tcs_delete_old_table;
5460 bool update_old_table = transition_capture->tcs_update_old_table;
5461 bool update_new_table = transition_capture->tcs_update_new_table;
5462 bool insert_new_table = transition_capture->tcs_insert_new_table;
5465 * For INSERT events NEW should be non-NULL, for DELETE events OLD should
5466 * be non-NULL, whereas for UPDATE events normally both OLD and NEW are
5467 * non-NULL. But for UPDATE events fired for capturing transition tuples
5468 * during UPDATE partition-key row movement, OLD is NULL when the event is
5469 * for a row being inserted, whereas NEW is NULL when the event is for a
5470 * row being deleted.
5472 Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
5473 TupIsNull(oldslot)));
5474 Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table &&
5475 TupIsNull(newslot)));
5477 if (!TupIsNull(oldslot))
5479 Assert(TupIsNull(newslot));
5480 if (event == TRIGGER_EVENT_DELETE && delete_old_table)
5481 tuplestore = transition_capture->tcs_private->old_del_tuplestore;
5482 else if (event == TRIGGER_EVENT_UPDATE && update_old_table)
5483 tuplestore = transition_capture->tcs_private->old_upd_tuplestore;
5485 else if (!TupIsNull(newslot))
5487 Assert(TupIsNull(oldslot));
5488 if (event == TRIGGER_EVENT_INSERT && insert_new_table)
5489 tuplestore = transition_capture->tcs_private->new_ins_tuplestore;
5490 else if (event == TRIGGER_EVENT_UPDATE && update_new_table)
5491 tuplestore = transition_capture->tcs_private->new_upd_tuplestore;
5494 return tuplestore;
5498 * Add the given heap tuple to the given tuplestore, applying the conversion
5499 * map if necessary.
5501 * If original_insert_tuple is given, we can add that tuple without conversion.
5503 static void
5504 TransitionTableAddTuple(EState *estate,
5505 TransitionCaptureState *transition_capture,
5506 ResultRelInfo *relinfo,
5507 TupleTableSlot *slot,
5508 TupleTableSlot *original_insert_tuple,
5509 Tuplestorestate *tuplestore)
5511 TupleConversionMap *map;
5514 * Nothing needs to be done if we don't have a tuplestore.
5516 if (tuplestore == NULL)
5517 return;
5519 if (original_insert_tuple)
5520 tuplestore_puttupleslot(tuplestore, original_insert_tuple);
5521 else if ((map = ExecGetChildToRootMap(relinfo)) != NULL)
5523 AfterTriggersTableData *table = transition_capture->tcs_private;
5524 TupleTableSlot *storeslot;
5526 storeslot = GetAfterTriggersStoreSlot(table, map->outdesc);
5527 execute_attr_map_slot(map->attrMap, slot, storeslot);
5528 tuplestore_puttupleslot(tuplestore, storeslot);
5530 else
5531 tuplestore_puttupleslot(tuplestore, slot);
5534 /* ----------
5535 * AfterTriggerEnlargeQueryState()
5537 * Prepare the necessary state so that we can record AFTER trigger events
5538 * queued by a query. It is allowed to have nested queries within a
5539 * (sub)transaction, so we need to have separate state for each query
5540 * nesting level.
5541 * ----------
5543 static void
5544 AfterTriggerEnlargeQueryState(void)
5546 int init_depth = afterTriggers.maxquerydepth;
5548 Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
5550 if (afterTriggers.maxquerydepth == 0)
5552 int new_alloc = Max(afterTriggers.query_depth + 1, 8);
5554 afterTriggers.query_stack = (AfterTriggersQueryData *)
5555 MemoryContextAlloc(TopTransactionContext,
5556 new_alloc * sizeof(AfterTriggersQueryData));
5557 afterTriggers.maxquerydepth = new_alloc;
5559 else
5561 /* repalloc will keep the stack in the same context */
5562 int old_alloc = afterTriggers.maxquerydepth;
5563 int new_alloc = Max(afterTriggers.query_depth + 1,
5564 old_alloc * 2);
5566 afterTriggers.query_stack = (AfterTriggersQueryData *)
5567 repalloc(afterTriggers.query_stack,
5568 new_alloc * sizeof(AfterTriggersQueryData));
5569 afterTriggers.maxquerydepth = new_alloc;
5572 /* Initialize new array entries to empty */
5573 while (init_depth < afterTriggers.maxquerydepth)
5575 AfterTriggersQueryData *qs = &afterTriggers.query_stack[init_depth];
5577 qs->events.head = NULL;
5578 qs->events.tail = NULL;
5579 qs->events.tailfree = NULL;
5580 qs->fdw_tuplestore = NULL;
5581 qs->tables = NIL;
5583 ++init_depth;
5588 * Create an empty SetConstraintState with room for numalloc trigstates
5590 static SetConstraintState
5591 SetConstraintStateCreate(int numalloc)
5593 SetConstraintState state;
5595 /* Behave sanely with numalloc == 0 */
5596 if (numalloc <= 0)
5597 numalloc = 1;
5600 * We assume that zeroing will correctly initialize the state values.
5602 state = (SetConstraintState)
5603 MemoryContextAllocZero(TopTransactionContext,
5604 offsetof(SetConstraintStateData, trigstates) +
5605 numalloc * sizeof(SetConstraintTriggerData));
5607 state->numalloc = numalloc;
5609 return state;
5613 * Copy a SetConstraintState
5615 static SetConstraintState
5616 SetConstraintStateCopy(SetConstraintState origstate)
5618 SetConstraintState state;
5620 state = SetConstraintStateCreate(origstate->numstates);
5622 state->all_isset = origstate->all_isset;
5623 state->all_isdeferred = origstate->all_isdeferred;
5624 state->numstates = origstate->numstates;
5625 memcpy(state->trigstates, origstate->trigstates,
5626 origstate->numstates * sizeof(SetConstraintTriggerData));
5628 return state;
5632 * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
5633 * pointer to the state object (it will change if we have to repalloc).
5635 static SetConstraintState
5636 SetConstraintStateAddItem(SetConstraintState state,
5637 Oid tgoid, bool tgisdeferred)
5639 if (state->numstates >= state->numalloc)
5641 int newalloc = state->numalloc * 2;
5643 newalloc = Max(newalloc, 8); /* in case original has size 0 */
5644 state = (SetConstraintState)
5645 repalloc(state,
5646 offsetof(SetConstraintStateData, trigstates) +
5647 newalloc * sizeof(SetConstraintTriggerData));
5648 state->numalloc = newalloc;
5649 Assert(state->numstates < state->numalloc);
5652 state->trigstates[state->numstates].sct_tgoid = tgoid;
5653 state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred;
5654 state->numstates++;
5656 return state;
5659 /* ----------
5660 * AfterTriggerSetState()
5662 * Execute the SET CONSTRAINTS ... utility command.
5663 * ----------
5665 void
5666 AfterTriggerSetState(ConstraintsSetStmt *stmt)
5668 int my_level = GetCurrentTransactionNestLevel();
5670 /* If we haven't already done so, initialize our state. */
5671 if (afterTriggers.state == NULL)
5672 afterTriggers.state = SetConstraintStateCreate(8);
5675 * If in a subtransaction, and we didn't save the current state already,
5676 * save it so it can be restored if the subtransaction aborts.
5678 if (my_level > 1 &&
5679 afterTriggers.trans_stack[my_level].state == NULL)
5681 afterTriggers.trans_stack[my_level].state =
5682 SetConstraintStateCopy(afterTriggers.state);
5686 * Handle SET CONSTRAINTS ALL ...
5688 if (stmt->constraints == NIL)
5691 * Forget any previous SET CONSTRAINTS commands in this transaction.
5693 afterTriggers.state->numstates = 0;
5696 * Set the per-transaction ALL state to known.
5698 afterTriggers.state->all_isset = true;
5699 afterTriggers.state->all_isdeferred = stmt->deferred;
5701 else
5703 Relation conrel;
5704 Relation tgrel;
5705 List *conoidlist = NIL;
5706 List *tgoidlist = NIL;
5707 ListCell *lc;
5710 * Handle SET CONSTRAINTS constraint-name [, ...]
5712 * First, identify all the named constraints and make a list of their
5713 * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
5714 * the same name within a schema, the specifications are not
5715 * necessarily unique. Our strategy is to target all matching
5716 * constraints within the first search-path schema that has any
5717 * matches, but disregard matches in schemas beyond the first match.
5718 * (This is a bit odd but it's the historical behavior.)
5720 * A constraint in a partitioned table may have corresponding
5721 * constraints in the partitions. Grab those too.
5723 conrel = table_open(ConstraintRelationId, AccessShareLock);
5725 foreach(lc, stmt->constraints)
5727 RangeVar *constraint = lfirst(lc);
5728 bool found;
5729 List *namespacelist;
5730 ListCell *nslc;
5732 if (constraint->catalogname)
5734 if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0)
5735 ereport(ERROR,
5736 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5737 errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
5738 constraint->catalogname, constraint->schemaname,
5739 constraint->relname)));
5743 * If we're given the schema name with the constraint, look only
5744 * in that schema. If given a bare constraint name, use the
5745 * search path to find the first matching constraint.
5747 if (constraint->schemaname)
5749 Oid namespaceId = LookupExplicitNamespace(constraint->schemaname,
5750 false);
5752 namespacelist = list_make1_oid(namespaceId);
5754 else
5756 namespacelist = fetch_search_path(true);
5759 found = false;
5760 foreach(nslc, namespacelist)
5762 Oid namespaceId = lfirst_oid(nslc);
5763 SysScanDesc conscan;
5764 ScanKeyData skey[2];
5765 HeapTuple tup;
5767 ScanKeyInit(&skey[0],
5768 Anum_pg_constraint_conname,
5769 BTEqualStrategyNumber, F_NAMEEQ,
5770 CStringGetDatum(constraint->relname));
5771 ScanKeyInit(&skey[1],
5772 Anum_pg_constraint_connamespace,
5773 BTEqualStrategyNumber, F_OIDEQ,
5774 ObjectIdGetDatum(namespaceId));
5776 conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
5777 true, NULL, 2, skey);
5779 while (HeapTupleIsValid(tup = systable_getnext(conscan)))
5781 Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tup);
5783 if (con->condeferrable)
5784 conoidlist = lappend_oid(conoidlist, con->oid);
5785 else if (stmt->deferred)
5786 ereport(ERROR,
5787 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
5788 errmsg("constraint \"%s\" is not deferrable",
5789 constraint->relname)));
5790 found = true;
5793 systable_endscan(conscan);
5796 * Once we've found a matching constraint we do not search
5797 * later parts of the search path.
5799 if (found)
5800 break;
5803 list_free(namespacelist);
5806 * Not found ?
5808 if (!found)
5809 ereport(ERROR,
5810 (errcode(ERRCODE_UNDEFINED_OBJECT),
5811 errmsg("constraint \"%s\" does not exist",
5812 constraint->relname)));
5816 * Scan for any possible descendants of the constraints. We append
5817 * whatever we find to the same list that we're scanning; this has the
5818 * effect that we create new scans for those, too, so if there are
5819 * further descendents, we'll also catch them.
5821 foreach(lc, conoidlist)
5823 Oid parent = lfirst_oid(lc);
5824 ScanKeyData key;
5825 SysScanDesc scan;
5826 HeapTuple tuple;
5828 ScanKeyInit(&key,
5829 Anum_pg_constraint_conparentid,
5830 BTEqualStrategyNumber, F_OIDEQ,
5831 ObjectIdGetDatum(parent));
5833 scan = systable_beginscan(conrel, ConstraintParentIndexId, true, NULL, 1, &key);
5835 while (HeapTupleIsValid(tuple = systable_getnext(scan)))
5837 Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tuple);
5839 conoidlist = lappend_oid(conoidlist, con->oid);
5842 systable_endscan(scan);
5845 table_close(conrel, AccessShareLock);
5848 * Now, locate the trigger(s) implementing each of these constraints,
5849 * and make a list of their OIDs.
5851 tgrel = table_open(TriggerRelationId, AccessShareLock);
5853 foreach(lc, conoidlist)
5855 Oid conoid = lfirst_oid(lc);
5856 ScanKeyData skey;
5857 SysScanDesc tgscan;
5858 HeapTuple htup;
5860 ScanKeyInit(&skey,
5861 Anum_pg_trigger_tgconstraint,
5862 BTEqualStrategyNumber, F_OIDEQ,
5863 ObjectIdGetDatum(conoid));
5865 tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
5866 NULL, 1, &skey);
5868 while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
5870 Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
5873 * Silently skip triggers that are marked as non-deferrable in
5874 * pg_trigger. This is not an error condition, since a
5875 * deferrable RI constraint may have some non-deferrable
5876 * actions.
5878 if (pg_trigger->tgdeferrable)
5879 tgoidlist = lappend_oid(tgoidlist, pg_trigger->oid);
5882 systable_endscan(tgscan);
5885 table_close(tgrel, AccessShareLock);
5888 * Now we can set the trigger states of individual triggers for this
5889 * xact.
5891 foreach(lc, tgoidlist)
5893 Oid tgoid = lfirst_oid(lc);
5894 SetConstraintState state = afterTriggers.state;
5895 bool found = false;
5896 int i;
5898 for (i = 0; i < state->numstates; i++)
5900 if (state->trigstates[i].sct_tgoid == tgoid)
5902 state->trigstates[i].sct_tgisdeferred = stmt->deferred;
5903 found = true;
5904 break;
5907 if (!found)
5909 afterTriggers.state =
5910 SetConstraintStateAddItem(state, tgoid, stmt->deferred);
5916 * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
5917 * checks against that constraint must be made when the SET CONSTRAINTS
5918 * command is executed -- i.e. the effects of the SET CONSTRAINTS command
5919 * apply retroactively. We've updated the constraints state, so scan the
5920 * list of previously deferred events to fire any that have now become
5921 * immediate.
5923 * Obviously, if this was SET ... DEFERRED then it can't have converted
5924 * any unfired events to immediate, so we need do nothing in that case.
5926 if (!stmt->deferred)
5928 AfterTriggerEventList *events = &afterTriggers.events;
5929 bool snapshot_set = false;
5931 while (afterTriggerMarkEvents(events, NULL, true))
5933 CommandId firing_id = afterTriggers.firing_counter++;
5936 * Make sure a snapshot has been established in case trigger
5937 * functions need one. Note that we avoid setting a snapshot if
5938 * we don't find at least one trigger that has to be fired now.
5939 * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
5940 * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
5941 * at the start of a transaction it's not possible for any trigger
5942 * events to be queued yet.)
5944 if (!snapshot_set)
5946 PushActiveSnapshot(GetTransactionSnapshot());
5947 snapshot_set = true;
5951 * We can delete fired events if we are at top transaction level,
5952 * but we'd better not if inside a subtransaction, since the
5953 * subtransaction could later get rolled back.
5955 if (afterTriggerInvokeEvents(events, firing_id, NULL,
5956 !IsSubTransaction()))
5957 break; /* all fired */
5960 if (snapshot_set)
5961 PopActiveSnapshot();
5965 /* ----------
5966 * AfterTriggerPendingOnRel()
5967 * Test to see if there are any pending after-trigger events for rel.
5969 * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
5970 * it is unsafe to perform major surgery on a relation. Note that only
5971 * local pending events are examined. We assume that having exclusive lock
5972 * on a rel guarantees there are no unserviced events in other backends ---
5973 * but having a lock does not prevent there being such events in our own.
5975 * In some scenarios it'd be reasonable to remove pending events (more
5976 * specifically, mark them DONE by the current subxact) but without a lot
5977 * of knowledge of the trigger semantics we can't do this in general.
5978 * ----------
5980 bool
5981 AfterTriggerPendingOnRel(Oid relid)
5983 AfterTriggerEvent event;
5984 AfterTriggerEventChunk *chunk;
5985 int depth;
5987 /* Scan queued events */
5988 for_each_event_chunk(event, chunk, afterTriggers.events)
5990 AfterTriggerShared evtshared = GetTriggerSharedData(event);
5993 * We can ignore completed events. (Even if a DONE flag is rolled
5994 * back by subxact abort, it's OK because the effects of the TRUNCATE
5995 * or whatever must get rolled back too.)
5997 if (event->ate_flags & AFTER_TRIGGER_DONE)
5998 continue;
6000 if (evtshared->ats_relid == relid)
6001 return true;
6005 * Also scan events queued by incomplete queries. This could only matter
6006 * if TRUNCATE/etc is executed by a function or trigger within an updating
6007 * query on the same relation, which is pretty perverse, but let's check.
6009 for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
6011 for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth].events)
6013 AfterTriggerShared evtshared = GetTriggerSharedData(event);
6015 if (event->ate_flags & AFTER_TRIGGER_DONE)
6016 continue;
6018 if (evtshared->ats_relid == relid)
6019 return true;
6023 return false;
6026 /* ----------
6027 * AfterTriggerSaveEvent()
6029 * Called by ExecA[RS]...Triggers() to queue up the triggers that should
6030 * be fired for an event.
6032 * NOTE: this is called whenever there are any triggers associated with
6033 * the event (even if they are disabled). This function decides which
6034 * triggers actually need to be queued. It is also called after each row,
6035 * even if there are no triggers for that event, if there are any AFTER
6036 * STATEMENT triggers for the statement which use transition tables, so that
6037 * the transition tuplestores can be built. Furthermore, if the transition
6038 * capture is happening for UPDATEd rows being moved to another partition due
6039 * to the partition-key being changed, then this function is called once when
6040 * the row is deleted (to capture OLD row), and once when the row is inserted
6041 * into another partition (to capture NEW row). This is done separately because
6042 * DELETE and INSERT happen on different tables.
6044 * Transition tuplestores are built now, rather than when events are pulled
6045 * off of the queue because AFTER ROW triggers are allowed to select from the
6046 * transition tables for the statement.
6048 * This contains special support to queue the update events for the case where
6049 * a partitioned table undergoing a cross-partition update may have foreign
6050 * keys pointing into it. Normally, a partitioned table's row triggers are
6051 * not fired because the leaf partition(s) which are modified as a result of
6052 * the operation on the partitioned table contain the same triggers which are
6053 * fired instead. But that general scheme can cause problematic behavior with
6054 * foreign key triggers during cross-partition updates, which are implemented
6055 * as DELETE on the source partition followed by INSERT into the destination
6056 * partition. Specifically, firing DELETE triggers would lead to the wrong
6057 * foreign key action to be enforced considering that the original command is
6058 * UPDATE; in this case, this function is called with relinfo as the
6059 * partitioned table, and src_partinfo and dst_partinfo referring to the
6060 * source and target leaf partitions, respectively.
6062 * is_crosspart_update is true either when a DELETE event is fired on the
6063 * source partition (which is to be ignored) or an UPDATE event is fired on
6064 * the root partitioned table.
6065 * ----------
6067 static void
6068 AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
6069 ResultRelInfo *src_partinfo,
6070 ResultRelInfo *dst_partinfo,
6071 int event, bool row_trigger,
6072 TupleTableSlot *oldslot, TupleTableSlot *newslot,
6073 List *recheckIndexes, Bitmapset *modifiedCols,
6074 TransitionCaptureState *transition_capture,
6075 bool is_crosspart_update)
6077 Relation rel = relinfo->ri_RelationDesc;
6078 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
6079 AfterTriggerEventData new_event;
6080 AfterTriggerSharedData new_shared;
6081 char relkind = rel->rd_rel->relkind;
6082 int tgtype_event;
6083 int tgtype_level;
6084 int i;
6085 Tuplestorestate *fdw_tuplestore = NULL;
6088 * Check state. We use a normal test not Assert because it is possible to
6089 * reach here in the wrong state given misconfigured RI triggers, in
6090 * particular deferring a cascade action trigger.
6092 if (afterTriggers.query_depth < 0)
6093 elog(ERROR, "AfterTriggerSaveEvent() called outside of query");
6095 /* Be sure we have enough space to record events at this query depth. */
6096 if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
6097 AfterTriggerEnlargeQueryState();
6100 * If the directly named relation has any triggers with transition tables,
6101 * then we need to capture transition tuples.
6103 if (row_trigger && transition_capture != NULL)
6105 TupleTableSlot *original_insert_tuple = transition_capture->tcs_original_insert_tuple;
6108 * Capture the old tuple in the appropriate transition table based on
6109 * the event.
6111 if (!TupIsNull(oldslot))
6113 Tuplestorestate *old_tuplestore;
6115 old_tuplestore = GetAfterTriggersTransitionTable(event,
6116 oldslot,
6117 NULL,
6118 transition_capture);
6119 TransitionTableAddTuple(estate, transition_capture, relinfo,
6120 oldslot, NULL, old_tuplestore);
6124 * Capture the new tuple in the appropriate transition table based on
6125 * the event.
6127 if (!TupIsNull(newslot))
6129 Tuplestorestate *new_tuplestore;
6131 new_tuplestore = GetAfterTriggersTransitionTable(event,
6132 NULL,
6133 newslot,
6134 transition_capture);
6135 TransitionTableAddTuple(estate, transition_capture, relinfo,
6136 newslot, original_insert_tuple, new_tuplestore);
6140 * If transition tables are the only reason we're here, return. As
6141 * mentioned above, we can also be here during update tuple routing in
6142 * presence of transition tables, in which case this function is
6143 * called separately for OLD and NEW, so we expect exactly one of them
6144 * to be NULL.
6146 if (trigdesc == NULL ||
6147 (event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
6148 (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
6149 (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row) ||
6150 (event == TRIGGER_EVENT_UPDATE && (TupIsNull(oldslot) ^ TupIsNull(newslot))))
6151 return;
6155 * We normally don't see partitioned tables here for row level triggers
6156 * except in the special case of a cross-partition update. In that case,
6157 * nodeModifyTable.c:ExecCrossPartitionUpdateForeignKey() calls here to
6158 * queue an update event on the root target partitioned table, also
6159 * passing the source and destination partitions and their tuples.
6161 Assert(!row_trigger ||
6162 rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE ||
6163 (is_crosspart_update &&
6164 TRIGGER_FIRED_BY_UPDATE(event) &&
6165 src_partinfo != NULL && dst_partinfo != NULL));
6168 * Validate the event code and collect the associated tuple CTIDs.
6170 * The event code will be used both as a bitmask and an array offset, so
6171 * validation is important to make sure we don't walk off the edge of our
6172 * arrays.
6174 * Also, if we're considering statement-level triggers, check whether we
6175 * already queued a set of them for this event, and cancel the prior set
6176 * if so. This preserves the behavior that statement-level triggers fire
6177 * just once per statement and fire after row-level triggers.
6179 switch (event)
6181 case TRIGGER_EVENT_INSERT:
6182 tgtype_event = TRIGGER_TYPE_INSERT;
6183 if (row_trigger)
6185 Assert(oldslot == NULL);
6186 Assert(newslot != NULL);
6187 ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid1));
6188 ItemPointerSetInvalid(&(new_event.ate_ctid2));
6190 else
6192 Assert(oldslot == NULL);
6193 Assert(newslot == NULL);
6194 ItemPointerSetInvalid(&(new_event.ate_ctid1));
6195 ItemPointerSetInvalid(&(new_event.ate_ctid2));
6196 cancel_prior_stmt_triggers(RelationGetRelid(rel),
6197 CMD_INSERT, event);
6199 break;
6200 case TRIGGER_EVENT_DELETE:
6201 tgtype_event = TRIGGER_TYPE_DELETE;
6202 if (row_trigger)
6204 Assert(oldslot != NULL);
6205 Assert(newslot == NULL);
6206 ItemPointerCopy(&(oldslot->tts_tid), &(new_event.ate_ctid1));
6207 ItemPointerSetInvalid(&(new_event.ate_ctid2));
6209 else
6211 Assert(oldslot == NULL);
6212 Assert(newslot == NULL);
6213 ItemPointerSetInvalid(&(new_event.ate_ctid1));
6214 ItemPointerSetInvalid(&(new_event.ate_ctid2));
6215 cancel_prior_stmt_triggers(RelationGetRelid(rel),
6216 CMD_DELETE, event);
6218 break;
6219 case TRIGGER_EVENT_UPDATE:
6220 tgtype_event = TRIGGER_TYPE_UPDATE;
6221 if (row_trigger)
6223 Assert(oldslot != NULL);
6224 Assert(newslot != NULL);
6225 ItemPointerCopy(&(oldslot->tts_tid), &(new_event.ate_ctid1));
6226 ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid2));
6229 * Also remember the OIDs of partitions to fetch these tuples
6230 * out of later in AfterTriggerExecute().
6232 if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
6234 Assert(src_partinfo != NULL && dst_partinfo != NULL);
6235 new_event.ate_src_part =
6236 RelationGetRelid(src_partinfo->ri_RelationDesc);
6237 new_event.ate_dst_part =
6238 RelationGetRelid(dst_partinfo->ri_RelationDesc);
6241 else
6243 Assert(oldslot == NULL);
6244 Assert(newslot == NULL);
6245 ItemPointerSetInvalid(&(new_event.ate_ctid1));
6246 ItemPointerSetInvalid(&(new_event.ate_ctid2));
6247 cancel_prior_stmt_triggers(RelationGetRelid(rel),
6248 CMD_UPDATE, event);
6250 break;
6251 case TRIGGER_EVENT_TRUNCATE:
6252 tgtype_event = TRIGGER_TYPE_TRUNCATE;
6253 Assert(oldslot == NULL);
6254 Assert(newslot == NULL);
6255 ItemPointerSetInvalid(&(new_event.ate_ctid1));
6256 ItemPointerSetInvalid(&(new_event.ate_ctid2));
6257 break;
6258 default:
6259 elog(ERROR, "invalid after-trigger event code: %d", event);
6260 tgtype_event = 0; /* keep compiler quiet */
6261 break;
6264 /* Determine flags */
6265 if (!(relkind == RELKIND_FOREIGN_TABLE && row_trigger))
6267 if (row_trigger && event == TRIGGER_EVENT_UPDATE)
6269 if (relkind == RELKIND_PARTITIONED_TABLE)
6270 new_event.ate_flags = AFTER_TRIGGER_CP_UPDATE;
6271 else
6272 new_event.ate_flags = AFTER_TRIGGER_2CTID;
6274 else
6275 new_event.ate_flags = AFTER_TRIGGER_1CTID;
6278 /* else, we'll initialize ate_flags for each trigger */
6280 tgtype_level = (row_trigger ? TRIGGER_TYPE_ROW : TRIGGER_TYPE_STATEMENT);
6283 * Must convert/copy the source and destination partition tuples into the
6284 * root partitioned table's format/slot, because the processing in the
6285 * loop below expects both oldslot and newslot tuples to be in that form.
6287 if (row_trigger && rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
6289 TupleTableSlot *rootslot;
6290 TupleConversionMap *map;
6292 rootslot = ExecGetTriggerOldSlot(estate, relinfo);
6293 map = ExecGetChildToRootMap(src_partinfo);
6294 if (map)
6295 oldslot = execute_attr_map_slot(map->attrMap,
6296 oldslot,
6297 rootslot);
6298 else
6299 oldslot = ExecCopySlot(rootslot, oldslot);
6301 rootslot = ExecGetTriggerNewSlot(estate, relinfo);
6302 map = ExecGetChildToRootMap(dst_partinfo);
6303 if (map)
6304 newslot = execute_attr_map_slot(map->attrMap,
6305 newslot,
6306 rootslot);
6307 else
6308 newslot = ExecCopySlot(rootslot, newslot);
6311 for (i = 0; i < trigdesc->numtriggers; i++)
6313 Trigger *trigger = &trigdesc->triggers[i];
6315 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
6316 tgtype_level,
6317 TRIGGER_TYPE_AFTER,
6318 tgtype_event))
6319 continue;
6320 if (!TriggerEnabled(estate, relinfo, trigger, event,
6321 modifiedCols, oldslot, newslot))
6322 continue;
6324 if (relkind == RELKIND_FOREIGN_TABLE && row_trigger)
6326 if (fdw_tuplestore == NULL)
6328 fdw_tuplestore = GetCurrentFDWTuplestore();
6329 new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
6331 else
6332 /* subsequent event for the same tuple */
6333 new_event.ate_flags = AFTER_TRIGGER_FDW_REUSE;
6337 * If the trigger is a foreign key enforcement trigger, there are
6338 * certain cases where we can skip queueing the event because we can
6339 * tell by inspection that the FK constraint will still pass. There
6340 * are also some cases during cross-partition updates of a partitioned
6341 * table where queuing the event can be skipped.
6343 if (TRIGGER_FIRED_BY_UPDATE(event) || TRIGGER_FIRED_BY_DELETE(event))
6345 switch (RI_FKey_trigger_type(trigger->tgfoid))
6347 case RI_TRIGGER_PK:
6350 * For cross-partitioned updates of partitioned PK table,
6351 * skip the event fired by the component delete on the
6352 * source leaf partition unless the constraint originates
6353 * in the partition itself (!tgisclone), because the
6354 * update event that will be fired on the root
6355 * (partitioned) target table will be used to perform the
6356 * necessary foreign key enforcement action.
6358 if (is_crosspart_update &&
6359 TRIGGER_FIRED_BY_DELETE(event) &&
6360 trigger->tgisclone)
6361 continue;
6363 /* Update or delete on trigger's PK table */
6364 if (!RI_FKey_pk_upd_check_required(trigger, rel,
6365 oldslot, newslot))
6367 /* skip queuing this event */
6368 continue;
6370 break;
6372 case RI_TRIGGER_FK:
6375 * Update on trigger's FK table. We can skip the update
6376 * event fired on a partitioned table during a
6377 * cross-partition of that table, because the insert event
6378 * that is fired on the destination leaf partition would
6379 * suffice to perform the necessary foreign key check.
6380 * Moreover, RI_FKey_fk_upd_check_required() expects to be
6381 * passed a tuple that contains system attributes, most of
6382 * which are not present in the virtual slot belonging to
6383 * a partitioned table.
6385 if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ||
6386 !RI_FKey_fk_upd_check_required(trigger, rel,
6387 oldslot, newslot))
6389 /* skip queuing this event */
6390 continue;
6392 break;
6394 case RI_TRIGGER_NONE:
6397 * Not an FK trigger. No need to queue the update event
6398 * fired during a cross-partitioned update of a
6399 * partitioned table, because the same row trigger must be
6400 * present in the leaf partition(s) that are affected as
6401 * part of this update and the events fired on them are
6402 * queued instead.
6404 if (row_trigger &&
6405 rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
6406 continue;
6407 break;
6412 * If the trigger is a deferred unique constraint check trigger, only
6413 * queue it if the unique constraint was potentially violated, which
6414 * we know from index insertion time.
6416 if (trigger->tgfoid == F_UNIQUE_KEY_RECHECK)
6418 if (!list_member_oid(recheckIndexes, trigger->tgconstrindid))
6419 continue; /* Uniqueness definitely not violated */
6423 * Fill in event structure and add it to the current query's queue.
6424 * Note we set ats_table to NULL whenever this trigger doesn't use
6425 * transition tables, to improve sharability of the shared event data.
6427 new_shared.ats_event =
6428 (event & TRIGGER_EVENT_OPMASK) |
6429 (row_trigger ? TRIGGER_EVENT_ROW : 0) |
6430 (trigger->tgdeferrable ? AFTER_TRIGGER_DEFERRABLE : 0) |
6431 (trigger->tginitdeferred ? AFTER_TRIGGER_INITDEFERRED : 0);
6432 new_shared.ats_tgoid = trigger->tgoid;
6433 new_shared.ats_relid = RelationGetRelid(rel);
6434 new_shared.ats_firing_id = 0;
6435 if ((trigger->tgoldtable || trigger->tgnewtable) &&
6436 transition_capture != NULL)
6437 new_shared.ats_table = transition_capture->tcs_private;
6438 else
6439 new_shared.ats_table = NULL;
6440 new_shared.ats_modifiedcols = afterTriggerCopyBitmap(modifiedCols);
6442 afterTriggerAddEvent(&afterTriggers.query_stack[afterTriggers.query_depth].events,
6443 &new_event, &new_shared);
6447 * Finally, spool any foreign tuple(s). The tuplestore squashes them to
6448 * minimal tuples, so this loses any system columns. The executor lost
6449 * those columns before us, for an unrelated reason, so this is fine.
6451 if (fdw_tuplestore)
6453 if (oldslot != NULL)
6454 tuplestore_puttupleslot(fdw_tuplestore, oldslot);
6455 if (newslot != NULL)
6456 tuplestore_puttupleslot(fdw_tuplestore, newslot);
6461 * Detect whether we already queued BEFORE STATEMENT triggers for the given
6462 * relation + operation, and set the flag so the next call will report "true".
6464 static bool
6465 before_stmt_triggers_fired(Oid relid, CmdType cmdType)
6467 bool result;
6468 AfterTriggersTableData *table;
6470 /* Check state, like AfterTriggerSaveEvent. */
6471 if (afterTriggers.query_depth < 0)
6472 elog(ERROR, "before_stmt_triggers_fired() called outside of query");
6474 /* Be sure we have enough space to record events at this query depth. */
6475 if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
6476 AfterTriggerEnlargeQueryState();
6479 * We keep this state in the AfterTriggersTableData that also holds
6480 * transition tables for the relation + operation. In this way, if we are
6481 * forced to make a new set of transition tables because more tuples get
6482 * entered after we've already fired triggers, we will allow a new set of
6483 * statement triggers to get queued.
6485 table = GetAfterTriggersTableData(relid, cmdType);
6486 result = table->before_trig_done;
6487 table->before_trig_done = true;
6488 return result;
6492 * If we previously queued a set of AFTER STATEMENT triggers for the given
6493 * relation + operation, and they've not been fired yet, cancel them. The
6494 * caller will queue a fresh set that's after any row-level triggers that may
6495 * have been queued by the current sub-statement, preserving (as much as
6496 * possible) the property that AFTER ROW triggers fire before AFTER STATEMENT
6497 * triggers, and that the latter only fire once. This deals with the
6498 * situation where several FK enforcement triggers sequentially queue triggers
6499 * for the same table into the same trigger query level. We can't fully
6500 * prevent odd behavior though: if there are AFTER ROW triggers taking
6501 * transition tables, we don't want to change the transition tables once the
6502 * first such trigger has seen them. In such a case, any additional events
6503 * will result in creating new transition tables and allowing new firings of
6504 * statement triggers.
6506 * This also saves the current event list location so that a later invocation
6507 * of this function can cheaply find the triggers we're about to queue and
6508 * cancel them.
6510 static void
6511 cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent)
6513 AfterTriggersTableData *table;
6514 AfterTriggersQueryData *qs = &afterTriggers.query_stack[afterTriggers.query_depth];
6517 * We keep this state in the AfterTriggersTableData that also holds
6518 * transition tables for the relation + operation. In this way, if we are
6519 * forced to make a new set of transition tables because more tuples get
6520 * entered after we've already fired triggers, we will allow a new set of
6521 * statement triggers to get queued without canceling the old ones.
6523 table = GetAfterTriggersTableData(relid, cmdType);
6525 if (table->after_trig_done)
6528 * We want to start scanning from the tail location that existed just
6529 * before we inserted any statement triggers. But the events list
6530 * might've been entirely empty then, in which case scan from the
6531 * current head.
6533 AfterTriggerEvent event;
6534 AfterTriggerEventChunk *chunk;
6536 if (table->after_trig_events.tail)
6538 chunk = table->after_trig_events.tail;
6539 event = (AfterTriggerEvent) table->after_trig_events.tailfree;
6541 else
6543 chunk = qs->events.head;
6544 event = NULL;
6547 for_each_chunk_from(chunk)
6549 if (event == NULL)
6550 event = (AfterTriggerEvent) CHUNK_DATA_START(chunk);
6551 for_each_event_from(event, chunk)
6553 AfterTriggerShared evtshared = GetTriggerSharedData(event);
6556 * Exit loop when we reach events that aren't AS triggers for
6557 * the target relation.
6559 if (evtshared->ats_relid != relid)
6560 goto done;
6561 if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) != tgevent)
6562 goto done;
6563 if (!TRIGGER_FIRED_FOR_STATEMENT(evtshared->ats_event))
6564 goto done;
6565 if (!TRIGGER_FIRED_AFTER(evtshared->ats_event))
6566 goto done;
6567 /* OK, mark it DONE */
6568 event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
6569 event->ate_flags |= AFTER_TRIGGER_DONE;
6571 /* signal we must reinitialize event ptr for next chunk */
6572 event = NULL;
6575 done:
6577 /* In any case, save current insertion point for next time */
6578 table->after_trig_done = true;
6579 table->after_trig_events = qs->events;
6583 * GUC assign_hook for session_replication_role
6585 void
6586 assign_session_replication_role(int newval, void *extra)
6589 * Must flush the plan cache when changing replication role; but don't
6590 * flush unnecessarily.
6592 if (SessionReplicationRole != newval)
6593 ResetPlanCache();
6597 * SQL function pg_trigger_depth()
6599 Datum
6600 pg_trigger_depth(PG_FUNCTION_ARGS)
6602 PG_RETURN_INT32(MyTriggerDepth);