1 /*-------------------------------------------------------------------------
4 * PostgreSQL TRIGGERs support code.
6 * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
10 * src/backend/commands/trigger.c
12 *-------------------------------------------------------------------------
16 #include "access/genam.h"
17 #include "access/htup_details.h"
18 #include "access/relation.h"
19 #include "access/sysattr.h"
20 #include "access/table.h"
21 #include "access/tableam.h"
22 #include "access/xact.h"
23 #include "catalog/catalog.h"
24 #include "catalog/dependency.h"
25 #include "catalog/index.h"
26 #include "catalog/indexing.h"
27 #include "catalog/objectaccess.h"
28 #include "catalog/partition.h"
29 #include "catalog/pg_constraint.h"
30 #include "catalog/pg_inherits.h"
31 #include "catalog/pg_proc.h"
32 #include "catalog/pg_trigger.h"
33 #include "catalog/pg_type.h"
34 #include "commands/dbcommands.h"
35 #include "commands/defrem.h"
36 #include "commands/trigger.h"
37 #include "executor/executor.h"
38 #include "executor/execPartition.h"
39 #include "miscadmin.h"
40 #include "nodes/bitmapset.h"
41 #include "nodes/makefuncs.h"
42 #include "optimizer/optimizer.h"
43 #include "parser/parse_clause.h"
44 #include "parser/parse_collate.h"
45 #include "parser/parse_func.h"
46 #include "parser/parse_relation.h"
47 #include "parser/parsetree.h"
48 #include "partitioning/partdesc.h"
50 #include "rewrite/rewriteManip.h"
51 #include "storage/bufmgr.h"
52 #include "storage/lmgr.h"
53 #include "tcop/utility.h"
54 #include "utils/acl.h"
55 #include "utils/builtins.h"
56 #include "utils/bytea.h"
57 #include "utils/fmgroids.h"
58 #include "utils/guc_hooks.h"
59 #include "utils/inval.h"
60 #include "utils/lsyscache.h"
61 #include "utils/memutils.h"
62 #include "utils/plancache.h"
63 #include "utils/rel.h"
64 #include "utils/snapmgr.h"
65 #include "utils/syscache.h"
66 #include "utils/tuplestore.h"
70 int SessionReplicationRole
= SESSION_REPLICATION_ROLE_ORIGIN
;
72 /* How many levels deep into trigger execution are we? */
73 static int MyTriggerDepth
= 0;
75 /* Local function prototypes */
76 static void renametrig_internal(Relation tgrel
, Relation targetrel
,
77 HeapTuple trigtup
, const char *newname
,
78 const char *expected_name
);
79 static void renametrig_partition(Relation tgrel
, Oid partitionId
,
80 Oid parentTriggerOid
, const char *newname
,
81 const char *expected_name
);
82 static void SetTriggerFlags(TriggerDesc
*trigdesc
, Trigger
*trigger
);
83 static bool GetTupleForTrigger(EState
*estate
,
85 ResultRelInfo
*relinfo
,
87 LockTupleMode lockmode
,
88 TupleTableSlot
*oldslot
,
89 TupleTableSlot
**epqslot
,
90 TM_FailureData
*tmfdp
);
91 static bool TriggerEnabled(EState
*estate
, ResultRelInfo
*relinfo
,
92 Trigger
*trigger
, TriggerEvent event
,
93 Bitmapset
*modifiedCols
,
94 TupleTableSlot
*oldslot
, TupleTableSlot
*newslot
);
95 static HeapTuple
ExecCallTriggerFunc(TriggerData
*trigdata
,
98 Instrumentation
*instr
,
99 MemoryContext per_tuple_context
);
100 static void AfterTriggerSaveEvent(EState
*estate
, ResultRelInfo
*relinfo
,
101 ResultRelInfo
*src_partinfo
,
102 ResultRelInfo
*dst_partinfo
,
103 int event
, bool row_trigger
,
104 TupleTableSlot
*oldslot
, TupleTableSlot
*newslot
,
105 List
*recheckIndexes
, Bitmapset
*modifiedCols
,
106 TransitionCaptureState
*transition_capture
,
107 bool is_crosspart_update
);
108 static void AfterTriggerEnlargeQueryState(void);
109 static bool before_stmt_triggers_fired(Oid relid
, CmdType cmdType
);
113 * Create a trigger. Returns the address of the created trigger.
115 * queryString is the source text of the CREATE TRIGGER command.
116 * This must be supplied if a whenClause is specified, else it can be NULL.
118 * relOid, if nonzero, is the relation on which the trigger should be
119 * created. If zero, the name provided in the statement will be looked up.
121 * refRelOid, if nonzero, is the relation to which the constraint trigger
122 * refers. If zero, the constraint relation name provided in the statement
123 * will be looked up as needed.
125 * constraintOid, if nonzero, says that this trigger is being created
126 * internally to implement that constraint. A suitable pg_depend entry will
127 * be made to link the trigger to that constraint. constraintOid is zero when
128 * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
129 * TRIGGER, we build a pg_constraint entry internally.)
131 * indexOid, if nonzero, is the OID of an index associated with the constraint.
132 * We do nothing with this except store it into pg_trigger.tgconstrindid;
133 * but when creating a trigger for a deferrable unique constraint on a
134 * partitioned table, its children are looked up. Note we don't cope with
135 * invalid indexes in that case.
137 * funcoid, if nonzero, is the OID of the function to invoke. When this is
138 * given, stmt->funcname is ignored.
140 * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
141 * if that trigger is dropped, this one should be too. There are two cases
142 * when a nonzero value is passed for this: 1) when this function recurses to
143 * create the trigger on partitions, 2) when creating child foreign key
144 * triggers; see CreateFKCheckTrigger() and createForeignKeyActionTriggers().
146 * If whenClause is passed, it is an already-transformed expression for
147 * WHEN. In this case, we ignore any that may come in stmt->whenClause.
149 * If isInternal is true then this is an internally-generated trigger.
150 * This argument sets the tgisinternal field of the pg_trigger entry, and
151 * if true causes us to modify the given trigger name to ensure uniqueness.
153 * When isInternal is not true we require ACL_TRIGGER permissions on the
154 * relation, as well as ACL_EXECUTE on the trigger function. For internal
155 * triggers the caller must apply any required permission checks.
157 * When called on partitioned tables, this function recurses to create the
158 * trigger on all the partitions, except if isInternal is true, in which
159 * case caller is expected to execute recursion on its own. in_partition
160 * indicates such a recursive call; outside callers should pass "false"
161 * (but see CloneRowTriggersToPartition).
164 CreateTrigger(CreateTrigStmt
*stmt
, const char *queryString
,
165 Oid relOid
, Oid refRelOid
, Oid constraintOid
, Oid indexOid
,
166 Oid funcoid
, Oid parentTriggerOid
, Node
*whenClause
,
167 bool isInternal
, bool in_partition
)
170 CreateTriggerFiringOn(stmt
, queryString
, relOid
, refRelOid
,
171 constraintOid
, indexOid
, funcoid
,
172 parentTriggerOid
, whenClause
, isInternal
,
173 in_partition
, TRIGGER_FIRES_ON_ORIGIN
);
177 * Like the above; additionally the firing condition
178 * (always/origin/replica/disabled) can be specified.
181 CreateTriggerFiringOn(CreateTrigStmt
*stmt
, const char *queryString
,
182 Oid relOid
, Oid refRelOid
, Oid constraintOid
,
183 Oid indexOid
, Oid funcoid
, Oid parentTriggerOid
,
184 Node
*whenClause
, bool isInternal
, bool in_partition
,
185 char trigger_fires_when
)
193 Datum values
[Natts_pg_trigger
];
194 bool nulls
[Natts_pg_trigger
];
199 HeapTuple tuple
= NULL
;
201 Oid trigoid
= InvalidOid
;
202 char internaltrigname
[NAMEDATALEN
];
204 Oid constrrelid
= InvalidOid
;
205 ObjectAddress myself
,
207 char *oldtablename
= NULL
;
208 char *newtablename
= NULL
;
209 bool partition_recurse
;
210 bool trigger_exists
= false;
211 Oid existing_constraint_oid
= InvalidOid
;
212 bool existing_isInternal
= false;
213 bool existing_isClone
= false;
215 if (OidIsValid(relOid
))
216 rel
= table_open(relOid
, ShareRowExclusiveLock
);
218 rel
= table_openrv(stmt
->relation
, ShareRowExclusiveLock
);
221 * Triggers must be on tables or views, and there are additional
222 * relation-type-specific restrictions.
224 if (rel
->rd_rel
->relkind
== RELKIND_RELATION
)
226 /* Tables can't have INSTEAD OF triggers */
227 if (stmt
->timing
!= TRIGGER_TYPE_BEFORE
&&
228 stmt
->timing
!= TRIGGER_TYPE_AFTER
)
230 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
231 errmsg("\"%s\" is a table",
232 RelationGetRelationName(rel
)),
233 errdetail("Tables cannot have INSTEAD OF triggers.")));
235 else if (rel
->rd_rel
->relkind
== RELKIND_PARTITIONED_TABLE
)
237 /* Partitioned tables can't have INSTEAD OF triggers */
238 if (stmt
->timing
!= TRIGGER_TYPE_BEFORE
&&
239 stmt
->timing
!= TRIGGER_TYPE_AFTER
)
241 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
242 errmsg("\"%s\" is a table",
243 RelationGetRelationName(rel
)),
244 errdetail("Tables cannot have INSTEAD OF triggers.")));
247 * FOR EACH ROW triggers have further restrictions
252 * Disallow use of transition tables.
254 * Note that we have another restriction about transition tables
255 * in partitions; search for 'has_superclass' below for an
256 * explanation. The check here is just to protect from the fact
257 * that if we allowed it here, the creation would succeed for a
258 * partitioned table with no partitions, but would be blocked by
259 * the other restriction when the first partition was created,
260 * which is very unfriendly behavior.
262 if (stmt
->transitionRels
!= NIL
)
264 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
265 errmsg("\"%s\" is a partitioned table",
266 RelationGetRelationName(rel
)),
267 errdetail("Triggers on partitioned tables cannot have transition tables.")));
270 else if (rel
->rd_rel
->relkind
== RELKIND_VIEW
)
273 * Views can have INSTEAD OF triggers (which we check below are
274 * row-level), or statement-level BEFORE/AFTER triggers.
276 if (stmt
->timing
!= TRIGGER_TYPE_INSTEAD
&& stmt
->row
)
278 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
279 errmsg("\"%s\" is a view",
280 RelationGetRelationName(rel
)),
281 errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
282 /* Disallow TRUNCATE triggers on VIEWs */
283 if (TRIGGER_FOR_TRUNCATE(stmt
->events
))
285 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
286 errmsg("\"%s\" is a view",
287 RelationGetRelationName(rel
)),
288 errdetail("Views cannot have TRUNCATE triggers.")));
290 else if (rel
->rd_rel
->relkind
== RELKIND_FOREIGN_TABLE
)
292 if (stmt
->timing
!= TRIGGER_TYPE_BEFORE
&&
293 stmt
->timing
!= TRIGGER_TYPE_AFTER
)
295 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
296 errmsg("\"%s\" is a foreign table",
297 RelationGetRelationName(rel
)),
298 errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
301 * We disallow constraint triggers to protect the assumption that
302 * triggers on FKs can't be deferred. See notes with AfterTriggers
303 * data structures, below.
305 if (stmt
->isconstraint
)
307 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
308 errmsg("\"%s\" is a foreign table",
309 RelationGetRelationName(rel
)),
310 errdetail("Foreign tables cannot have constraint triggers.")));
314 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
315 errmsg("relation \"%s\" cannot have triggers",
316 RelationGetRelationName(rel
)),
317 errdetail_relkind_not_supported(rel
->rd_rel
->relkind
)));
319 if (!allowSystemTableMods
&& IsSystemRelation(rel
))
321 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE
),
322 errmsg("permission denied: \"%s\" is a system catalog",
323 RelationGetRelationName(rel
))));
325 if (stmt
->isconstraint
)
328 * We must take a lock on the target relation to protect against
329 * concurrent drop. It's not clear that AccessShareLock is strong
330 * enough, but we certainly need at least that much... otherwise, we
331 * might end up creating a pg_constraint entry referencing a
334 if (OidIsValid(refRelOid
))
336 LockRelationOid(refRelOid
, AccessShareLock
);
337 constrrelid
= refRelOid
;
339 else if (stmt
->constrrel
!= NULL
)
340 constrrelid
= RangeVarGetRelid(stmt
->constrrel
, AccessShareLock
,
344 /* permission checks */
347 aclresult
= pg_class_aclcheck(RelationGetRelid(rel
), GetUserId(),
349 if (aclresult
!= ACLCHECK_OK
)
350 aclcheck_error(aclresult
, get_relkind_objtype(rel
->rd_rel
->relkind
),
351 RelationGetRelationName(rel
));
353 if (OidIsValid(constrrelid
))
355 aclresult
= pg_class_aclcheck(constrrelid
, GetUserId(),
357 if (aclresult
!= ACLCHECK_OK
)
358 aclcheck_error(aclresult
, get_relkind_objtype(get_rel_relkind(constrrelid
)),
359 get_rel_name(constrrelid
));
364 * When called on a partitioned table to create a FOR EACH ROW trigger
365 * that's not internal, we create one trigger for each partition, too.
367 * For that, we'd better hold lock on all of them ahead of time.
369 partition_recurse
= !isInternal
&& stmt
->row
&&
370 rel
->rd_rel
->relkind
== RELKIND_PARTITIONED_TABLE
;
371 if (partition_recurse
)
372 list_free(find_all_inheritors(RelationGetRelid(rel
),
373 ShareRowExclusiveLock
, NULL
));
376 TRIGGER_CLEAR_TYPE(tgtype
);
378 TRIGGER_SETT_ROW(tgtype
);
379 tgtype
|= stmt
->timing
;
380 tgtype
|= stmt
->events
;
382 /* Disallow ROW-level TRUNCATE triggers */
383 if (TRIGGER_FOR_ROW(tgtype
) && TRIGGER_FOR_TRUNCATE(tgtype
))
385 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
386 errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
388 /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
389 if (TRIGGER_FOR_INSTEAD(tgtype
))
391 if (!TRIGGER_FOR_ROW(tgtype
))
393 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
394 errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
395 if (stmt
->whenClause
)
397 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
398 errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
399 if (stmt
->columns
!= NIL
)
401 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
402 errmsg("INSTEAD OF triggers cannot have column lists")));
406 * We don't yet support naming ROW transition variables, but the parser
407 * recognizes the syntax so we can give a nicer message here.
409 * Per standard, REFERENCING TABLE names are only allowed on AFTER
410 * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
411 * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
412 * only allowed once. Per standard, OLD may not be specified when
413 * creating a trigger only for INSERT, and NEW may not be specified when
414 * creating a trigger only for DELETE.
416 * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
417 * reference both ROW and TABLE transition data.
419 if (stmt
->transitionRels
!= NIL
)
421 List
*varList
= stmt
->transitionRels
;
426 TriggerTransition
*tt
= lfirst_node(TriggerTransition
, lc
);
430 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
431 errmsg("ROW variable naming in the REFERENCING clause is not supported"),
432 errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
435 * Because of the above test, we omit further ROW-related testing
436 * below. If we later allow naming OLD and NEW ROW variables,
437 * adjustments will be needed below.
440 if (rel
->rd_rel
->relkind
== RELKIND_FOREIGN_TABLE
)
442 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
443 errmsg("\"%s\" is a foreign table",
444 RelationGetRelationName(rel
)),
445 errdetail("Triggers on foreign tables cannot have transition tables.")));
447 if (rel
->rd_rel
->relkind
== RELKIND_VIEW
)
449 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
450 errmsg("\"%s\" is a view",
451 RelationGetRelationName(rel
)),
452 errdetail("Triggers on views cannot have transition tables.")));
455 * We currently don't allow row-level triggers with transition
456 * tables on partition or inheritance children. Such triggers
457 * would somehow need to see tuples converted to the format of the
458 * table they're attached to, and it's not clear which subset of
459 * tuples each child should see. See also the prohibitions in
460 * ATExecAttachPartition() and ATExecAddInherit().
462 if (TRIGGER_FOR_ROW(tgtype
) && has_superclass(rel
->rd_id
))
464 /* Use appropriate error message. */
465 if (rel
->rd_rel
->relispartition
)
467 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
468 errmsg("ROW triggers with transition tables are not supported on partitions")));
471 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
472 errmsg("ROW triggers with transition tables are not supported on inheritance children")));
475 if (stmt
->timing
!= TRIGGER_TYPE_AFTER
)
477 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION
),
478 errmsg("transition table name can only be specified for an AFTER trigger")));
480 if (TRIGGER_FOR_TRUNCATE(tgtype
))
482 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
483 errmsg("TRUNCATE triggers with transition tables are not supported")));
486 * We currently don't allow multi-event triggers ("INSERT OR
487 * UPDATE") with transition tables, because it's not clear how to
488 * handle INSERT ... ON CONFLICT statements which can fire both
489 * INSERT and UPDATE triggers. We show the inserted tuples to
490 * INSERT triggers and the updated tuples to UPDATE triggers, but
491 * it's not yet clear what INSERT OR UPDATE trigger should see.
492 * This restriction could be lifted if we can decide on the right
493 * semantics in a later release.
495 if (((TRIGGER_FOR_INSERT(tgtype
) ? 1 : 0) +
496 (TRIGGER_FOR_UPDATE(tgtype
) ? 1 : 0) +
497 (TRIGGER_FOR_DELETE(tgtype
) ? 1 : 0)) != 1)
499 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
500 errmsg("transition tables cannot be specified for triggers with more than one event")));
503 * We currently don't allow column-specific triggers with
504 * transition tables. Per spec, that seems to require
505 * accumulating separate transition tables for each combination of
506 * columns, which is a lot of work for a rather marginal feature.
508 if (stmt
->columns
!= NIL
)
510 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
511 errmsg("transition tables cannot be specified for triggers with column lists")));
514 * We disallow constraint triggers with transition tables, to
515 * protect the assumption that such triggers can't be deferred.
516 * See notes with AfterTriggers data structures, below.
518 * Currently this is enforced by the grammar, so just Assert here.
520 Assert(!stmt
->isconstraint
);
524 if (!(TRIGGER_FOR_INSERT(tgtype
) ||
525 TRIGGER_FOR_UPDATE(tgtype
)))
527 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION
),
528 errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
530 if (newtablename
!= NULL
)
532 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION
),
533 errmsg("NEW TABLE cannot be specified multiple times")));
535 newtablename
= tt
->name
;
539 if (!(TRIGGER_FOR_DELETE(tgtype
) ||
540 TRIGGER_FOR_UPDATE(tgtype
)))
542 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION
),
543 errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
545 if (oldtablename
!= NULL
)
547 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION
),
548 errmsg("OLD TABLE cannot be specified multiple times")));
550 oldtablename
= tt
->name
;
554 if (newtablename
!= NULL
&& oldtablename
!= NULL
&&
555 strcmp(newtablename
, oldtablename
) == 0)
557 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION
),
558 errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
562 * Parse the WHEN clause, if any and we weren't passed an already
565 * Note that as a side effect, we fill whenRtable when parsing. If we got
566 * an already parsed clause, this does not occur, which is what we want --
567 * no point in adding redundant dependencies below.
569 if (!whenClause
&& stmt
->whenClause
)
572 ParseNamespaceItem
*nsitem
;
576 /* Set up a pstate to parse with */
577 pstate
= make_parsestate(NULL
);
578 pstate
->p_sourcetext
= queryString
;
581 * Set up nsitems for OLD and NEW references.
583 * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
585 nsitem
= addRangeTableEntryForRelation(pstate
, rel
,
587 makeAlias("old", NIL
),
589 addNSItemToQuery(pstate
, nsitem
, false, true, true);
590 nsitem
= addRangeTableEntryForRelation(pstate
, rel
,
592 makeAlias("new", NIL
),
594 addNSItemToQuery(pstate
, nsitem
, false, true, true);
596 /* Transform expression. Copy to be sure we don't modify original */
597 whenClause
= transformWhereClause(pstate
,
598 copyObject(stmt
->whenClause
),
599 EXPR_KIND_TRIGGER_WHEN
,
601 /* we have to fix its collations too */
602 assign_expr_collations(pstate
, whenClause
);
605 * Check for disallowed references to OLD/NEW.
607 * NB: pull_var_clause is okay here only because we don't allow
608 * subselects in WHEN clauses; it would fail to examine the contents
611 varList
= pull_var_clause(whenClause
, 0);
614 Var
*var
= (Var
*) lfirst(lc
);
619 if (!TRIGGER_FOR_ROW(tgtype
))
621 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION
),
622 errmsg("statement trigger's WHEN condition cannot reference column values"),
623 parser_errposition(pstate
, var
->location
)));
624 if (TRIGGER_FOR_INSERT(tgtype
))
626 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION
),
627 errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
628 parser_errposition(pstate
, var
->location
)));
629 /* system columns are okay here */
632 if (!TRIGGER_FOR_ROW(tgtype
))
634 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION
),
635 errmsg("statement trigger's WHEN condition cannot reference column values"),
636 parser_errposition(pstate
, var
->location
)));
637 if (TRIGGER_FOR_DELETE(tgtype
))
639 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION
),
640 errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
641 parser_errposition(pstate
, var
->location
)));
642 if (var
->varattno
< 0 && TRIGGER_FOR_BEFORE(tgtype
))
644 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
645 errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
646 parser_errposition(pstate
, var
->location
)));
647 if (TRIGGER_FOR_BEFORE(tgtype
) &&
648 var
->varattno
== 0 &&
649 RelationGetDescr(rel
)->constr
&&
650 RelationGetDescr(rel
)->constr
->has_generated_stored
)
652 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION
),
653 errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
654 errdetail("A whole-row reference is used and the table contains generated columns."),
655 parser_errposition(pstate
, var
->location
)));
656 if (TRIGGER_FOR_BEFORE(tgtype
) &&
658 TupleDescAttr(RelationGetDescr(rel
), var
->varattno
- 1)->attgenerated
)
660 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION
),
661 errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
662 errdetail("Column \"%s\" is a generated column.",
663 NameStr(TupleDescAttr(RelationGetDescr(rel
), var
->varattno
- 1)->attname
)),
664 parser_errposition(pstate
, var
->location
)));
667 /* can't happen without add_missing_from, so just elog */
668 elog(ERROR
, "trigger WHEN condition cannot contain references to other relations");
673 /* we'll need the rtable for recordDependencyOnExpr */
674 whenRtable
= pstate
->p_rtable
;
676 qual
= nodeToString(whenClause
);
678 free_parsestate(pstate
);
680 else if (!whenClause
)
688 qual
= nodeToString(whenClause
);
693 * Find and validate the trigger function.
695 if (!OidIsValid(funcoid
))
696 funcoid
= LookupFuncName(stmt
->funcname
, 0, NULL
, false);
699 aclresult
= pg_proc_aclcheck(funcoid
, GetUserId(), ACL_EXECUTE
);
700 if (aclresult
!= ACLCHECK_OK
)
701 aclcheck_error(aclresult
, OBJECT_FUNCTION
,
702 NameListToString(stmt
->funcname
));
704 funcrettype
= get_func_rettype(funcoid
);
705 if (funcrettype
!= TRIGGEROID
)
707 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION
),
708 errmsg("function %s must return type %s",
709 NameListToString(stmt
->funcname
), "trigger")));
712 * Scan pg_trigger to see if there is already a trigger of the same name.
713 * Skip this for internally generated triggers, since we'll modify the
714 * name to be unique below.
716 * NOTE that this is cool only because we have ShareRowExclusiveLock on
717 * the relation, so the trigger set won't be changing underneath us.
719 tgrel
= table_open(TriggerRelationId
, RowExclusiveLock
);
722 ScanKeyData skeys
[2];
725 ScanKeyInit(&skeys
[0],
726 Anum_pg_trigger_tgrelid
,
727 BTEqualStrategyNumber
, F_OIDEQ
,
728 ObjectIdGetDatum(RelationGetRelid(rel
)));
730 ScanKeyInit(&skeys
[1],
731 Anum_pg_trigger_tgname
,
732 BTEqualStrategyNumber
, F_NAMEEQ
,
733 CStringGetDatum(stmt
->trigname
));
735 tgscan
= systable_beginscan(tgrel
, TriggerRelidNameIndexId
, true,
738 /* There should be at most one matching tuple */
739 if (HeapTupleIsValid(tuple
= systable_getnext(tgscan
)))
741 Form_pg_trigger oldtrigger
= (Form_pg_trigger
) GETSTRUCT(tuple
);
743 trigoid
= oldtrigger
->oid
;
744 existing_constraint_oid
= oldtrigger
->tgconstraint
;
745 existing_isInternal
= oldtrigger
->tgisinternal
;
746 existing_isClone
= OidIsValid(oldtrigger
->tgparentid
);
747 trigger_exists
= true;
748 /* copy the tuple to use in CatalogTupleUpdate() */
749 tuple
= heap_copytuple(tuple
);
751 systable_endscan(tgscan
);
756 /* Generate the OID for the new trigger. */
757 trigoid
= GetNewOidWithIndex(tgrel
, TriggerOidIndexId
,
758 Anum_pg_trigger_oid
);
763 * If OR REPLACE was specified, we'll replace the old trigger;
764 * otherwise complain about the duplicate name.
768 (errcode(ERRCODE_DUPLICATE_OBJECT
),
769 errmsg("trigger \"%s\" for relation \"%s\" already exists",
770 stmt
->trigname
, RelationGetRelationName(rel
))));
773 * An internal trigger or a child trigger (isClone) cannot be replaced
774 * by a user-defined trigger. However, skip this test when
775 * in_partition, because then we're recursing from a partitioned table
776 * and the check was made at the parent level.
778 if ((existing_isInternal
|| existing_isClone
) &&
779 !isInternal
&& !in_partition
)
781 (errcode(ERRCODE_DUPLICATE_OBJECT
),
782 errmsg("trigger \"%s\" for relation \"%s\" is an internal or a child trigger",
783 stmt
->trigname
, RelationGetRelationName(rel
))));
786 * It is not allowed to replace with a constraint trigger; gram.y
787 * should have enforced this already.
789 Assert(!stmt
->isconstraint
);
792 * It is not allowed to replace an existing constraint trigger,
793 * either. (The reason for these restrictions is partly that it seems
794 * difficult to deal with pending trigger events in such cases, and
795 * partly that the command might imply changing the constraint's
796 * properties as well, which doesn't seem nice.)
798 if (OidIsValid(existing_constraint_oid
))
800 (errcode(ERRCODE_DUPLICATE_OBJECT
),
801 errmsg("trigger \"%s\" for relation \"%s\" is a constraint trigger",
802 stmt
->trigname
, RelationGetRelationName(rel
))));
806 * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
807 * corresponding pg_constraint entry.
809 if (stmt
->isconstraint
&& !OidIsValid(constraintOid
))
811 /* Internal callers should have made their own constraints */
813 constraintOid
= CreateConstraintEntry(stmt
->trigname
,
814 RelationGetNamespace(rel
),
819 InvalidOid
, /* no parent */
820 RelationGetRelid(rel
),
821 NULL
, /* no conkey */
824 InvalidOid
, /* no domain */
825 InvalidOid
, /* no index */
826 InvalidOid
, /* no foreign key */
837 NULL
, /* no exclusion */
838 NULL
, /* no check constraint */
842 true, /* noinherit */
843 isInternal
); /* is_internal */
847 * If trigger is internally generated, modify the provided trigger name to
848 * ensure uniqueness by appending the trigger OID. (Callers will usually
849 * supply a simple constant trigger name in these cases.)
853 snprintf(internaltrigname
, sizeof(internaltrigname
),
854 "%s_%u", stmt
->trigname
, trigoid
);
855 trigname
= internaltrigname
;
859 /* user-defined trigger; use the specified trigger name as-is */
860 trigname
= stmt
->trigname
;
864 * Build the new pg_trigger tuple.
866 * When we're creating a trigger in a partition, we mark it as internal,
867 * even though we don't do the isInternal magic in this function. This
868 * makes the triggers in partitions identical to the ones in the
869 * partitioned tables, except that they are marked internal.
871 memset(nulls
, false, sizeof(nulls
));
873 values
[Anum_pg_trigger_oid
- 1] = ObjectIdGetDatum(trigoid
);
874 values
[Anum_pg_trigger_tgrelid
- 1] = ObjectIdGetDatum(RelationGetRelid(rel
));
875 values
[Anum_pg_trigger_tgparentid
- 1] = ObjectIdGetDatum(parentTriggerOid
);
876 values
[Anum_pg_trigger_tgname
- 1] = DirectFunctionCall1(namein
,
877 CStringGetDatum(trigname
));
878 values
[Anum_pg_trigger_tgfoid
- 1] = ObjectIdGetDatum(funcoid
);
879 values
[Anum_pg_trigger_tgtype
- 1] = Int16GetDatum(tgtype
);
880 values
[Anum_pg_trigger_tgenabled
- 1] = trigger_fires_when
;
881 values
[Anum_pg_trigger_tgisinternal
- 1] = BoolGetDatum(isInternal
);
882 values
[Anum_pg_trigger_tgconstrrelid
- 1] = ObjectIdGetDatum(constrrelid
);
883 values
[Anum_pg_trigger_tgconstrindid
- 1] = ObjectIdGetDatum(indexOid
);
884 values
[Anum_pg_trigger_tgconstraint
- 1] = ObjectIdGetDatum(constraintOid
);
885 values
[Anum_pg_trigger_tgdeferrable
- 1] = BoolGetDatum(stmt
->deferrable
);
886 values
[Anum_pg_trigger_tginitdeferred
- 1] = BoolGetDatum(stmt
->initdeferred
);
892 int16 nargs
= list_length(stmt
->args
);
895 foreach(le
, stmt
->args
)
897 char *ar
= strVal(lfirst(le
));
899 len
+= strlen(ar
) + 4;
906 args
= (char *) palloc(len
+ 1);
908 foreach(le
, stmt
->args
)
910 char *s
= strVal(lfirst(le
));
911 char *d
= args
+ strlen(args
);
921 values
[Anum_pg_trigger_tgnargs
- 1] = Int16GetDatum(nargs
);
922 values
[Anum_pg_trigger_tgargs
- 1] = DirectFunctionCall1(byteain
,
923 CStringGetDatum(args
));
927 values
[Anum_pg_trigger_tgnargs
- 1] = Int16GetDatum(0);
928 values
[Anum_pg_trigger_tgargs
- 1] = DirectFunctionCall1(byteain
,
929 CStringGetDatum(""));
932 /* build column number array if it's a column-specific trigger */
933 ncolumns
= list_length(stmt
->columns
);
941 columns
= (int16
*) palloc(ncolumns
* sizeof(int16
));
942 foreach(cell
, stmt
->columns
)
944 char *name
= strVal(lfirst(cell
));
948 /* Lookup column name. System columns are not allowed */
949 attnum
= attnameAttNum(rel
, name
, false);
950 if (attnum
== InvalidAttrNumber
)
952 (errcode(ERRCODE_UNDEFINED_COLUMN
),
953 errmsg("column \"%s\" of relation \"%s\" does not exist",
954 name
, RelationGetRelationName(rel
))));
956 /* Check for duplicates */
957 for (j
= i
- 1; j
>= 0; j
--)
959 if (columns
[j
] == attnum
)
961 (errcode(ERRCODE_DUPLICATE_COLUMN
),
962 errmsg("column \"%s\" specified more than once",
966 columns
[i
++] = attnum
;
969 tgattr
= buildint2vector(columns
, ncolumns
);
970 values
[Anum_pg_trigger_tgattr
- 1] = PointerGetDatum(tgattr
);
972 /* set tgqual if trigger has WHEN clause */
974 values
[Anum_pg_trigger_tgqual
- 1] = CStringGetTextDatum(qual
);
976 nulls
[Anum_pg_trigger_tgqual
- 1] = true;
979 values
[Anum_pg_trigger_tgoldtable
- 1] = DirectFunctionCall1(namein
,
980 CStringGetDatum(oldtablename
));
982 nulls
[Anum_pg_trigger_tgoldtable
- 1] = true;
984 values
[Anum_pg_trigger_tgnewtable
- 1] = DirectFunctionCall1(namein
,
985 CStringGetDatum(newtablename
));
987 nulls
[Anum_pg_trigger_tgnewtable
- 1] = true;
990 * Insert or replace tuple in pg_trigger.
994 tuple
= heap_form_tuple(tgrel
->rd_att
, values
, nulls
);
995 CatalogTupleInsert(tgrel
, tuple
);
1001 newtup
= heap_form_tuple(tgrel
->rd_att
, values
, nulls
);
1002 CatalogTupleUpdate(tgrel
, &tuple
->t_self
, newtup
);
1003 heap_freetuple(newtup
);
1006 heap_freetuple(tuple
); /* free either original or new tuple */
1007 table_close(tgrel
, RowExclusiveLock
);
1009 pfree(DatumGetPointer(values
[Anum_pg_trigger_tgname
- 1]));
1010 pfree(DatumGetPointer(values
[Anum_pg_trigger_tgargs
- 1]));
1011 pfree(DatumGetPointer(values
[Anum_pg_trigger_tgattr
- 1]));
1013 pfree(DatumGetPointer(values
[Anum_pg_trigger_tgoldtable
- 1]));
1015 pfree(DatumGetPointer(values
[Anum_pg_trigger_tgnewtable
- 1]));
1018 * Update relation's pg_class entry; if necessary; and if not, send an SI
1019 * message to make other backends (and this one) rebuild relcache entries.
1021 pgrel
= table_open(RelationRelationId
, RowExclusiveLock
);
1022 tuple
= SearchSysCacheCopy1(RELOID
,
1023 ObjectIdGetDatum(RelationGetRelid(rel
)));
1024 if (!HeapTupleIsValid(tuple
))
1025 elog(ERROR
, "cache lookup failed for relation %u",
1026 RelationGetRelid(rel
));
1027 if (!((Form_pg_class
) GETSTRUCT(tuple
))->relhastriggers
)
1029 ((Form_pg_class
) GETSTRUCT(tuple
))->relhastriggers
= true;
1031 CatalogTupleUpdate(pgrel
, &tuple
->t_self
, tuple
);
1033 CommandCounterIncrement();
1036 CacheInvalidateRelcacheByTuple(tuple
);
1038 heap_freetuple(tuple
);
1039 table_close(pgrel
, RowExclusiveLock
);
1042 * If we're replacing a trigger, flush all the old dependencies before
1043 * recording new ones.
1046 deleteDependencyRecordsFor(TriggerRelationId
, trigoid
, true);
1049 * Record dependencies for trigger. Always place a normal dependency on
1052 myself
.classId
= TriggerRelationId
;
1053 myself
.objectId
= trigoid
;
1054 myself
.objectSubId
= 0;
1056 referenced
.classId
= ProcedureRelationId
;
1057 referenced
.objectId
= funcoid
;
1058 referenced
.objectSubId
= 0;
1059 recordDependencyOn(&myself
, &referenced
, DEPENDENCY_NORMAL
);
1061 if (isInternal
&& OidIsValid(constraintOid
))
1064 * Internally-generated trigger for a constraint, so make it an
1065 * internal dependency of the constraint. We can skip depending on
1066 * the relation(s), as there'll be an indirect dependency via the
1069 referenced
.classId
= ConstraintRelationId
;
1070 referenced
.objectId
= constraintOid
;
1071 referenced
.objectSubId
= 0;
1072 recordDependencyOn(&myself
, &referenced
, DEPENDENCY_INTERNAL
);
1077 * User CREATE TRIGGER, so place dependencies. We make trigger be
1078 * auto-dropped if its relation is dropped or if the FK relation is
1079 * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
1081 referenced
.classId
= RelationRelationId
;
1082 referenced
.objectId
= RelationGetRelid(rel
);
1083 referenced
.objectSubId
= 0;
1084 recordDependencyOn(&myself
, &referenced
, DEPENDENCY_AUTO
);
1086 if (OidIsValid(constrrelid
))
1088 referenced
.classId
= RelationRelationId
;
1089 referenced
.objectId
= constrrelid
;
1090 referenced
.objectSubId
= 0;
1091 recordDependencyOn(&myself
, &referenced
, DEPENDENCY_AUTO
);
1093 /* Not possible to have an index dependency in this case */
1094 Assert(!OidIsValid(indexOid
));
1097 * If it's a user-specified constraint trigger, make the constraint
1098 * internally dependent on the trigger instead of vice versa.
1100 if (OidIsValid(constraintOid
))
1102 referenced
.classId
= ConstraintRelationId
;
1103 referenced
.objectId
= constraintOid
;
1104 referenced
.objectSubId
= 0;
1105 recordDependencyOn(&referenced
, &myself
, DEPENDENCY_INTERNAL
);
1109 * If it's a partition trigger, create the partition dependencies.
1111 if (OidIsValid(parentTriggerOid
))
1113 ObjectAddressSet(referenced
, TriggerRelationId
, parentTriggerOid
);
1114 recordDependencyOn(&myself
, &referenced
, DEPENDENCY_PARTITION_PRI
);
1115 ObjectAddressSet(referenced
, RelationRelationId
, RelationGetRelid(rel
));
1116 recordDependencyOn(&myself
, &referenced
, DEPENDENCY_PARTITION_SEC
);
1120 /* If column-specific trigger, add normal dependencies on columns */
1121 if (columns
!= NULL
)
1125 referenced
.classId
= RelationRelationId
;
1126 referenced
.objectId
= RelationGetRelid(rel
);
1127 for (i
= 0; i
< ncolumns
; i
++)
1129 referenced
.objectSubId
= columns
[i
];
1130 recordDependencyOn(&myself
, &referenced
, DEPENDENCY_NORMAL
);
1135 * If it has a WHEN clause, add dependencies on objects mentioned in the
1136 * expression (eg, functions, as well as any columns used).
1138 if (whenRtable
!= NIL
)
1139 recordDependencyOnExpr(&myself
, whenClause
, whenRtable
,
1142 /* Post creation hook for new trigger */
1143 InvokeObjectPostCreateHookArg(TriggerRelationId
, trigoid
, 0,
1147 * Lastly, create the trigger on child relations, if needed.
1149 if (partition_recurse
)
1151 PartitionDesc partdesc
= RelationGetPartitionDesc(rel
, true);
1153 MemoryContext oldcxt
,
1156 perChildCxt
= AllocSetContextCreate(CurrentMemoryContext
,
1158 ALLOCSET_SMALL_SIZES
);
1161 * We don't currently expect to be called with a valid indexOid. If
1162 * that ever changes then we'll need to write code here to find the
1163 * corresponding child index.
1165 Assert(!OidIsValid(indexOid
));
1167 oldcxt
= MemoryContextSwitchTo(perChildCxt
);
1169 /* Iterate to create the trigger on each existing partition */
1170 for (i
= 0; i
< partdesc
->nparts
; i
++)
1172 CreateTrigStmt
*childStmt
;
1176 childTbl
= table_open(partdesc
->oids
[i
], ShareRowExclusiveLock
);
1179 * Initialize our fabricated parse node by copying the original
1180 * one, then resetting fields that we pass separately.
1182 childStmt
= (CreateTrigStmt
*) copyObject(stmt
);
1183 childStmt
->funcname
= NIL
;
1184 childStmt
->whenClause
= NULL
;
1186 /* If there is a WHEN clause, create a modified copy of it */
1187 qual
= copyObject(whenClause
);
1189 map_partition_varattnos((List
*) qual
, PRS2_OLD_VARNO
,
1192 map_partition_varattnos((List
*) qual
, PRS2_NEW_VARNO
,
1195 CreateTriggerFiringOn(childStmt
, queryString
,
1196 partdesc
->oids
[i
], refRelOid
,
1197 InvalidOid
, InvalidOid
,
1198 funcoid
, trigoid
, qual
,
1199 isInternal
, true, trigger_fires_when
);
1201 table_close(childTbl
, NoLock
);
1203 MemoryContextReset(perChildCxt
);
1206 MemoryContextSwitchTo(oldcxt
);
1207 MemoryContextDelete(perChildCxt
);
1210 /* Keep lock on target rel until end of xact */
1211 table_close(rel
, NoLock
);
1217 * TriggerSetParentTrigger
1218 * Set a partition's trigger as child of its parent trigger,
1219 * or remove the linkage if parentTrigId is InvalidOid.
1221 * This updates the constraint's pg_trigger row to show it as inherited, and
1222 * adds PARTITION dependencies to prevent the trigger from being deleted
1223 * on its own. Alternatively, reverse that.
1226 TriggerSetParentTrigger(Relation trigRel
,
1232 ScanKeyData skey
[1];
1233 Form_pg_trigger trigForm
;
1236 ObjectAddress depender
;
1237 ObjectAddress referenced
;
1240 * Find the trigger to delete.
1242 ScanKeyInit(&skey
[0],
1243 Anum_pg_trigger_oid
,
1244 BTEqualStrategyNumber
, F_OIDEQ
,
1245 ObjectIdGetDatum(childTrigId
));
1247 tgscan
= systable_beginscan(trigRel
, TriggerOidIndexId
, true,
1250 tuple
= systable_getnext(tgscan
);
1251 if (!HeapTupleIsValid(tuple
))
1252 elog(ERROR
, "could not find tuple for trigger %u", childTrigId
);
1253 newtup
= heap_copytuple(tuple
);
1254 trigForm
= (Form_pg_trigger
) GETSTRUCT(newtup
);
1255 if (OidIsValid(parentTrigId
))
1257 /* don't allow setting parent for a constraint that already has one */
1258 if (OidIsValid(trigForm
->tgparentid
))
1259 elog(ERROR
, "trigger %u already has a parent trigger",
1262 trigForm
->tgparentid
= parentTrigId
;
1264 CatalogTupleUpdate(trigRel
, &tuple
->t_self
, newtup
);
1266 ObjectAddressSet(depender
, TriggerRelationId
, childTrigId
);
1268 ObjectAddressSet(referenced
, TriggerRelationId
, parentTrigId
);
1269 recordDependencyOn(&depender
, &referenced
, DEPENDENCY_PARTITION_PRI
);
1271 ObjectAddressSet(referenced
, RelationRelationId
, childTableId
);
1272 recordDependencyOn(&depender
, &referenced
, DEPENDENCY_PARTITION_SEC
);
1276 trigForm
->tgparentid
= InvalidOid
;
1278 CatalogTupleUpdate(trigRel
, &tuple
->t_self
, newtup
);
1280 deleteDependencyRecordsForClass(TriggerRelationId
, childTrigId
,
1282 DEPENDENCY_PARTITION_PRI
);
1283 deleteDependencyRecordsForClass(TriggerRelationId
, childTrigId
,
1285 DEPENDENCY_PARTITION_SEC
);
1288 heap_freetuple(newtup
);
1289 systable_endscan(tgscan
);
1294 * Guts of trigger deletion.
1297 RemoveTriggerById(Oid trigOid
)
1301 ScanKeyData skey
[1];
1306 tgrel
= table_open(TriggerRelationId
, RowExclusiveLock
);
1309 * Find the trigger to delete.
1311 ScanKeyInit(&skey
[0],
1312 Anum_pg_trigger_oid
,
1313 BTEqualStrategyNumber
, F_OIDEQ
,
1314 ObjectIdGetDatum(trigOid
));
1316 tgscan
= systable_beginscan(tgrel
, TriggerOidIndexId
, true,
1319 tup
= systable_getnext(tgscan
);
1320 if (!HeapTupleIsValid(tup
))
1321 elog(ERROR
, "could not find tuple for trigger %u", trigOid
);
1324 * Open and exclusive-lock the relation the trigger belongs to.
1326 relid
= ((Form_pg_trigger
) GETSTRUCT(tup
))->tgrelid
;
1328 rel
= table_open(relid
, AccessExclusiveLock
);
1330 if (rel
->rd_rel
->relkind
!= RELKIND_RELATION
&&
1331 rel
->rd_rel
->relkind
!= RELKIND_VIEW
&&
1332 rel
->rd_rel
->relkind
!= RELKIND_FOREIGN_TABLE
&&
1333 rel
->rd_rel
->relkind
!= RELKIND_PARTITIONED_TABLE
)
1335 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1336 errmsg("relation \"%s\" cannot have triggers",
1337 RelationGetRelationName(rel
)),
1338 errdetail_relkind_not_supported(rel
->rd_rel
->relkind
)));
1340 if (!allowSystemTableMods
&& IsSystemRelation(rel
))
1342 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE
),
1343 errmsg("permission denied: \"%s\" is a system catalog",
1344 RelationGetRelationName(rel
))));
1347 * Delete the pg_trigger tuple.
1349 CatalogTupleDelete(tgrel
, &tup
->t_self
);
1351 systable_endscan(tgscan
);
1352 table_close(tgrel
, RowExclusiveLock
);
1355 * We do not bother to try to determine whether any other triggers remain,
1356 * which would be needed in order to decide whether it's safe to clear the
1357 * relation's relhastriggers. (In any case, there might be a concurrent
1358 * process adding new triggers.) Instead, just force a relcache inval to
1359 * make other backends (and this one too!) rebuild their relcache entries.
1360 * There's no great harm in leaving relhastriggers true even if there are
1363 CacheInvalidateRelcache(rel
);
1365 /* Keep lock on trigger's rel until end of xact */
1366 table_close(rel
, NoLock
);
1370 * get_trigger_oid - Look up a trigger by name to find its OID.
1372 * If missing_ok is false, throw an error if trigger not found. If
1373 * true, just return InvalidOid.
1376 get_trigger_oid(Oid relid
, const char *trigname
, bool missing_ok
)
1379 ScanKeyData skey
[2];
1385 * Find the trigger, verify permissions, set up object address
1387 tgrel
= table_open(TriggerRelationId
, AccessShareLock
);
1389 ScanKeyInit(&skey
[0],
1390 Anum_pg_trigger_tgrelid
,
1391 BTEqualStrategyNumber
, F_OIDEQ
,
1392 ObjectIdGetDatum(relid
));
1393 ScanKeyInit(&skey
[1],
1394 Anum_pg_trigger_tgname
,
1395 BTEqualStrategyNumber
, F_NAMEEQ
,
1396 CStringGetDatum(trigname
));
1398 tgscan
= systable_beginscan(tgrel
, TriggerRelidNameIndexId
, true,
1401 tup
= systable_getnext(tgscan
);
1403 if (!HeapTupleIsValid(tup
))
1407 (errcode(ERRCODE_UNDEFINED_OBJECT
),
1408 errmsg("trigger \"%s\" for table \"%s\" does not exist",
1409 trigname
, get_rel_name(relid
))));
1414 oid
= ((Form_pg_trigger
) GETSTRUCT(tup
))->oid
;
1417 systable_endscan(tgscan
);
1418 table_close(tgrel
, AccessShareLock
);
1423 * Perform permissions and integrity checks before acquiring a relation lock.
1426 RangeVarCallbackForRenameTrigger(const RangeVar
*rv
, Oid relid
, Oid oldrelid
,
1432 tuple
= SearchSysCache1(RELOID
, ObjectIdGetDatum(relid
));
1433 if (!HeapTupleIsValid(tuple
))
1434 return; /* concurrently dropped */
1435 form
= (Form_pg_class
) GETSTRUCT(tuple
);
1437 /* only tables and views can have triggers */
1438 if (form
->relkind
!= RELKIND_RELATION
&& form
->relkind
!= RELKIND_VIEW
&&
1439 form
->relkind
!= RELKIND_FOREIGN_TABLE
&&
1440 form
->relkind
!= RELKIND_PARTITIONED_TABLE
)
1442 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1443 errmsg("relation \"%s\" cannot have triggers",
1445 errdetail_relkind_not_supported(form
->relkind
)));
1447 /* you must own the table to rename one of its triggers */
1448 if (!pg_class_ownercheck(relid
, GetUserId()))
1449 aclcheck_error(ACLCHECK_NOT_OWNER
, get_relkind_objtype(get_rel_relkind(relid
)), rv
->relname
);
1450 if (!allowSystemTableMods
&& IsSystemClass(relid
, form
))
1452 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE
),
1453 errmsg("permission denied: \"%s\" is a system catalog",
1456 ReleaseSysCache(tuple
);
1460 * renametrig - changes the name of a trigger on a relation
1462 * trigger name is changed in trigger catalog.
1463 * No record of the previous name is kept.
1465 * get proper relrelation from relation catalog (if not arg)
1466 * scan trigger catalog
1467 * for name conflict (within rel)
1468 * for original trigger (if not arg)
1469 * modify tgname in trigger tuple
1470 * update row in catalog
1473 renametrig(RenameStmt
*stmt
)
1482 ObjectAddress address
;
1485 * Look up name, check permissions, and acquire lock (which we will NOT
1486 * release until end of transaction).
1488 relid
= RangeVarGetRelidExtended(stmt
->relation
, AccessExclusiveLock
,
1490 RangeVarCallbackForRenameTrigger
,
1493 /* Have lock already, so just need to build relcache entry. */
1494 targetrel
= relation_open(relid
, NoLock
);
1497 * On partitioned tables, this operation recurses to partitions. Lock all
1500 if (targetrel
->rd_rel
->relkind
== RELKIND_PARTITIONED_TABLE
)
1501 (void) find_all_inheritors(relid
, AccessExclusiveLock
, NULL
);
1503 tgrel
= table_open(TriggerRelationId
, RowExclusiveLock
);
1506 * Search for the trigger to modify.
1508 ScanKeyInit(&key
[0],
1509 Anum_pg_trigger_tgrelid
,
1510 BTEqualStrategyNumber
, F_OIDEQ
,
1511 ObjectIdGetDatum(relid
));
1512 ScanKeyInit(&key
[1],
1513 Anum_pg_trigger_tgname
,
1514 BTEqualStrategyNumber
, F_NAMEEQ
,
1515 PointerGetDatum(stmt
->subname
));
1516 tgscan
= systable_beginscan(tgrel
, TriggerRelidNameIndexId
, true,
1518 if (HeapTupleIsValid(tuple
= systable_getnext(tgscan
)))
1520 Form_pg_trigger trigform
;
1522 trigform
= (Form_pg_trigger
) GETSTRUCT(tuple
);
1523 tgoid
= trigform
->oid
;
1526 * If the trigger descends from a trigger on a parent partitioned
1527 * table, reject the rename. We don't allow a trigger in a partition
1528 * to differ in name from that of its parent: that would lead to an
1529 * inconsistency that pg_dump would not reproduce.
1531 if (OidIsValid(trigform
->tgparentid
))
1533 errmsg("cannot rename trigger \"%s\" on table \"%s\"",
1534 stmt
->subname
, RelationGetRelationName(targetrel
)),
1535 errhint("Rename trigger on partitioned table \"%s\" instead.",
1536 get_rel_name(get_partition_parent(relid
, false))));
1539 /* Rename the trigger on this relation ... */
1540 renametrig_internal(tgrel
, targetrel
, tuple
, stmt
->newname
,
1543 /* ... and if it is partitioned, recurse to its partitions */
1544 if (targetrel
->rd_rel
->relkind
== RELKIND_PARTITIONED_TABLE
)
1546 PartitionDesc partdesc
= RelationGetPartitionDesc(targetrel
, true);
1548 for (int i
= 0; i
< partdesc
->nparts
; i
++)
1550 Oid partitionId
= partdesc
->oids
[i
];
1552 renametrig_partition(tgrel
, partitionId
, trigform
->oid
,
1553 stmt
->newname
, stmt
->subname
);
1560 (errcode(ERRCODE_UNDEFINED_OBJECT
),
1561 errmsg("trigger \"%s\" for table \"%s\" does not exist",
1562 stmt
->subname
, RelationGetRelationName(targetrel
))));
1565 ObjectAddressSet(address
, TriggerRelationId
, tgoid
);
1567 systable_endscan(tgscan
);
1569 table_close(tgrel
, RowExclusiveLock
);
1572 * Close rel, but keep exclusive lock!
1574 relation_close(targetrel
, NoLock
);
1580 * Subroutine for renametrig -- perform the actual work of renaming one
1581 * trigger on one table.
1583 * If the trigger has a name different from the expected one, raise a
1587 renametrig_internal(Relation tgrel
, Relation targetrel
, HeapTuple trigtup
,
1588 const char *newname
, const char *expected_name
)
1591 Form_pg_trigger tgform
;
1595 /* If the trigger already has the new name, nothing to do. */
1596 tgform
= (Form_pg_trigger
) GETSTRUCT(trigtup
);
1597 if (strcmp(NameStr(tgform
->tgname
), newname
) == 0)
1601 * Before actually trying the rename, search for triggers with the same
1602 * name. The update would fail with an ugly message in that case, and it
1603 * is better to throw a nicer error.
1605 ScanKeyInit(&key
[0],
1606 Anum_pg_trigger_tgrelid
,
1607 BTEqualStrategyNumber
, F_OIDEQ
,
1608 ObjectIdGetDatum(RelationGetRelid(targetrel
)));
1609 ScanKeyInit(&key
[1],
1610 Anum_pg_trigger_tgname
,
1611 BTEqualStrategyNumber
, F_NAMEEQ
,
1612 PointerGetDatum(newname
));
1613 tgscan
= systable_beginscan(tgrel
, TriggerRelidNameIndexId
, true,
1615 if (HeapTupleIsValid(tuple
= systable_getnext(tgscan
)))
1617 (errcode(ERRCODE_DUPLICATE_OBJECT
),
1618 errmsg("trigger \"%s\" for relation \"%s\" already exists",
1619 newname
, RelationGetRelationName(targetrel
))));
1620 systable_endscan(tgscan
);
1623 * The target name is free; update the existing pg_trigger tuple with it.
1625 tuple
= heap_copytuple(trigtup
); /* need a modifiable copy */
1626 tgform
= (Form_pg_trigger
) GETSTRUCT(tuple
);
1629 * If the trigger has a name different from what we expected, let the user
1630 * know. (We can proceed anyway, since we must have reached here following
1631 * a tgparentid link.)
1633 if (strcmp(NameStr(tgform
->tgname
), expected_name
) != 0)
1635 errmsg("renamed trigger \"%s\" on relation \"%s\"",
1636 NameStr(tgform
->tgname
),
1637 RelationGetRelationName(targetrel
)));
1639 namestrcpy(&tgform
->tgname
, newname
);
1641 CatalogTupleUpdate(tgrel
, &tuple
->t_self
, tuple
);
1643 InvokeObjectPostAlterHook(TriggerRelationId
, tgform
->oid
, 0);
1646 * Invalidate relation's relcache entry so that other backends (and this
1647 * one too!) are sent SI message to make them rebuild relcache entries.
1648 * (Ideally this should happen automatically...)
1650 CacheInvalidateRelcache(targetrel
);
1654 * Subroutine for renametrig -- Helper for recursing to partitions when
1655 * renaming triggers on a partitioned table.
1658 renametrig_partition(Relation tgrel
, Oid partitionId
, Oid parentTriggerOid
,
1659 const char *newname
, const char *expected_name
)
1666 * Given a relation and the OID of a trigger on parent relation, find the
1667 * corresponding trigger in the child and rename that trigger to the given
1671 Anum_pg_trigger_tgrelid
,
1672 BTEqualStrategyNumber
, F_OIDEQ
,
1673 ObjectIdGetDatum(partitionId
));
1674 tgscan
= systable_beginscan(tgrel
, TriggerRelidNameIndexId
, true,
1676 while (HeapTupleIsValid(tuple
= systable_getnext(tgscan
)))
1678 Form_pg_trigger tgform
= (Form_pg_trigger
) GETSTRUCT(tuple
);
1679 Relation partitionRel
;
1681 if (tgform
->tgparentid
!= parentTriggerOid
)
1682 continue; /* not our trigger */
1684 partitionRel
= table_open(partitionId
, NoLock
);
1686 /* Rename the trigger on this partition */
1687 renametrig_internal(tgrel
, partitionRel
, tuple
, newname
, expected_name
);
1689 /* And if this relation is partitioned, recurse to its partitions */
1690 if (partitionRel
->rd_rel
->relkind
== RELKIND_PARTITIONED_TABLE
)
1692 PartitionDesc partdesc
= RelationGetPartitionDesc(partitionRel
,
1695 for (int i
= 0; i
< partdesc
->nparts
; i
++)
1697 Oid partitionId
= partdesc
->oids
[i
];
1699 renametrig_partition(tgrel
, partitionId
, tgform
->oid
, newname
,
1700 NameStr(tgform
->tgname
));
1703 table_close(partitionRel
, NoLock
);
1705 /* There should be at most one matching tuple */
1708 systable_endscan(tgscan
);
1712 * EnableDisableTrigger()
1714 * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1715 * to change 'tgenabled' field for the specified trigger(s)
1717 * rel: relation to process (caller must hold suitable lock on it)
1718 * tgname: trigger to process, or NULL to scan all triggers
1719 * fires_when: new value for tgenabled field. In addition to generic
1720 * enablement/disablement, this also defines when the trigger
1721 * should be fired in session replication roles.
1722 * skip_system: if true, skip "system" triggers (constraint triggers)
1723 * recurse: if true, recurse to partitions
1725 * Caller should have checked permissions for the table; here we also
1726 * enforce that superuser privilege is required to alter the state of
1730 EnableDisableTrigger(Relation rel
, const char *tgname
,
1731 char fires_when
, bool skip_system
, bool recurse
,
1736 ScanKeyData keys
[2];
1742 /* Scan the relevant entries in pg_triggers */
1743 tgrel
= table_open(TriggerRelationId
, RowExclusiveLock
);
1745 ScanKeyInit(&keys
[0],
1746 Anum_pg_trigger_tgrelid
,
1747 BTEqualStrategyNumber
, F_OIDEQ
,
1748 ObjectIdGetDatum(RelationGetRelid(rel
)));
1751 ScanKeyInit(&keys
[1],
1752 Anum_pg_trigger_tgname
,
1753 BTEqualStrategyNumber
, F_NAMEEQ
,
1754 CStringGetDatum(tgname
));
1760 tgscan
= systable_beginscan(tgrel
, TriggerRelidNameIndexId
, true,
1763 found
= changed
= false;
1765 while (HeapTupleIsValid(tuple
= systable_getnext(tgscan
)))
1767 Form_pg_trigger oldtrig
= (Form_pg_trigger
) GETSTRUCT(tuple
);
1769 if (oldtrig
->tgisinternal
)
1771 /* system trigger ... ok to process? */
1776 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE
),
1777 errmsg("permission denied: \"%s\" is a system trigger",
1778 NameStr(oldtrig
->tgname
))));
1783 if (oldtrig
->tgenabled
!= fires_when
)
1785 /* need to change this one ... make a copy to scribble on */
1786 HeapTuple newtup
= heap_copytuple(tuple
);
1787 Form_pg_trigger newtrig
= (Form_pg_trigger
) GETSTRUCT(newtup
);
1789 newtrig
->tgenabled
= fires_when
;
1791 CatalogTupleUpdate(tgrel
, &newtup
->t_self
, newtup
);
1793 heap_freetuple(newtup
);
1799 * When altering FOR EACH ROW triggers on a partitioned table, do the
1800 * same on the partitions as well, unless ONLY is specified.
1802 * Note that we recurse even if we didn't change the trigger above,
1803 * because the partitions' copy of the trigger may have a different
1804 * value of tgenabled than the parent's trigger and thus might need to
1808 rel
->rd_rel
->relkind
== RELKIND_PARTITIONED_TABLE
&&
1809 (TRIGGER_FOR_ROW(oldtrig
->tgtype
)))
1811 PartitionDesc partdesc
= RelationGetPartitionDesc(rel
, true);
1814 for (i
= 0; i
< partdesc
->nparts
; i
++)
1818 part
= relation_open(partdesc
->oids
[i
], lockmode
);
1819 EnableDisableTrigger(part
, NameStr(oldtrig
->tgname
),
1820 fires_when
, skip_system
, recurse
,
1822 table_close(part
, NoLock
); /* keep lock till commit */
1826 InvokeObjectPostAlterHook(TriggerRelationId
,
1830 systable_endscan(tgscan
);
1832 table_close(tgrel
, RowExclusiveLock
);
1834 if (tgname
&& !found
)
1836 (errcode(ERRCODE_UNDEFINED_OBJECT
),
1837 errmsg("trigger \"%s\" for table \"%s\" does not exist",
1838 tgname
, RelationGetRelationName(rel
))));
1841 * If we changed anything, broadcast a SI inval message to force each
1842 * backend (including our own!) to rebuild relation's relcache entry.
1843 * Otherwise they will fail to apply the change promptly.
1846 CacheInvalidateRelcache(rel
);
1851 * Build trigger data to attach to the given relcache entry.
1853 * Note that trigger data attached to a relcache entry must be stored in
1854 * CacheMemoryContext to ensure it survives as long as the relcache entry.
1855 * But we should be running in a less long-lived working context. To avoid
1856 * leaking cache memory if this routine fails partway through, we build a
1857 * temporary TriggerDesc in working memory and then copy the completed
1858 * structure into cache memory.
1861 RelationBuildTriggers(Relation relation
)
1863 TriggerDesc
*trigdesc
;
1871 MemoryContext oldContext
;
1875 * Allocate a working array to hold the triggers (the array is extended if
1879 triggers
= (Trigger
*) palloc(maxtrigs
* sizeof(Trigger
));
1883 * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1884 * be reading the triggers in name order, except possibly during
1885 * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1886 * ensures that triggers will be fired in name order.
1889 Anum_pg_trigger_tgrelid
,
1890 BTEqualStrategyNumber
, F_OIDEQ
,
1891 ObjectIdGetDatum(RelationGetRelid(relation
)));
1893 tgrel
= table_open(TriggerRelationId
, AccessShareLock
);
1894 tgscan
= systable_beginscan(tgrel
, TriggerRelidNameIndexId
, true,
1897 while (HeapTupleIsValid(htup
= systable_getnext(tgscan
)))
1899 Form_pg_trigger pg_trigger
= (Form_pg_trigger
) GETSTRUCT(htup
);
1904 if (numtrigs
>= maxtrigs
)
1907 triggers
= (Trigger
*) repalloc(triggers
, maxtrigs
* sizeof(Trigger
));
1909 build
= &(triggers
[numtrigs
]);
1911 build
->tgoid
= pg_trigger
->oid
;
1912 build
->tgname
= DatumGetCString(DirectFunctionCall1(nameout
,
1913 NameGetDatum(&pg_trigger
->tgname
)));
1914 build
->tgfoid
= pg_trigger
->tgfoid
;
1915 build
->tgtype
= pg_trigger
->tgtype
;
1916 build
->tgenabled
= pg_trigger
->tgenabled
;
1917 build
->tgisinternal
= pg_trigger
->tgisinternal
;
1918 build
->tgisclone
= OidIsValid(pg_trigger
->tgparentid
);
1919 build
->tgconstrrelid
= pg_trigger
->tgconstrrelid
;
1920 build
->tgconstrindid
= pg_trigger
->tgconstrindid
;
1921 build
->tgconstraint
= pg_trigger
->tgconstraint
;
1922 build
->tgdeferrable
= pg_trigger
->tgdeferrable
;
1923 build
->tginitdeferred
= pg_trigger
->tginitdeferred
;
1924 build
->tgnargs
= pg_trigger
->tgnargs
;
1925 /* tgattr is first var-width field, so OK to access directly */
1926 build
->tgnattr
= pg_trigger
->tgattr
.dim1
;
1927 if (build
->tgnattr
> 0)
1929 build
->tgattr
= (int16
*) palloc(build
->tgnattr
* sizeof(int16
));
1930 memcpy(build
->tgattr
, &(pg_trigger
->tgattr
.values
),
1931 build
->tgnattr
* sizeof(int16
));
1934 build
->tgattr
= NULL
;
1935 if (build
->tgnargs
> 0)
1940 val
= DatumGetByteaPP(fastgetattr(htup
,
1941 Anum_pg_trigger_tgargs
,
1942 tgrel
->rd_att
, &isnull
));
1944 elog(ERROR
, "tgargs is null in trigger for relation \"%s\"",
1945 RelationGetRelationName(relation
));
1946 p
= (char *) VARDATA_ANY(val
);
1947 build
->tgargs
= (char **) palloc(build
->tgnargs
* sizeof(char *));
1948 for (i
= 0; i
< build
->tgnargs
; i
++)
1950 build
->tgargs
[i
] = pstrdup(p
);
1955 build
->tgargs
= NULL
;
1957 datum
= fastgetattr(htup
, Anum_pg_trigger_tgoldtable
,
1958 tgrel
->rd_att
, &isnull
);
1961 DatumGetCString(DirectFunctionCall1(nameout
, datum
));
1963 build
->tgoldtable
= NULL
;
1965 datum
= fastgetattr(htup
, Anum_pg_trigger_tgnewtable
,
1966 tgrel
->rd_att
, &isnull
);
1969 DatumGetCString(DirectFunctionCall1(nameout
, datum
));
1971 build
->tgnewtable
= NULL
;
1973 datum
= fastgetattr(htup
, Anum_pg_trigger_tgqual
,
1974 tgrel
->rd_att
, &isnull
);
1976 build
->tgqual
= TextDatumGetCString(datum
);
1978 build
->tgqual
= NULL
;
1983 systable_endscan(tgscan
);
1984 table_close(tgrel
, AccessShareLock
);
1986 /* There might not be any triggers */
1993 /* Build trigdesc */
1994 trigdesc
= (TriggerDesc
*) palloc0(sizeof(TriggerDesc
));
1995 trigdesc
->triggers
= triggers
;
1996 trigdesc
->numtriggers
= numtrigs
;
1997 for (i
= 0; i
< numtrigs
; i
++)
1998 SetTriggerFlags(trigdesc
, &(triggers
[i
]));
2000 /* Copy completed trigdesc into cache storage */
2001 oldContext
= MemoryContextSwitchTo(CacheMemoryContext
);
2002 relation
->trigdesc
= CopyTriggerDesc(trigdesc
);
2003 MemoryContextSwitchTo(oldContext
);
2005 /* Release working memory */
2006 FreeTriggerDesc(trigdesc
);
2010 * Update the TriggerDesc's hint flags to include the specified trigger
2013 SetTriggerFlags(TriggerDesc
*trigdesc
, Trigger
*trigger
)
2015 int16 tgtype
= trigger
->tgtype
;
2017 trigdesc
->trig_insert_before_row
|=
2018 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_ROW
,
2019 TRIGGER_TYPE_BEFORE
, TRIGGER_TYPE_INSERT
);
2020 trigdesc
->trig_insert_after_row
|=
2021 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_ROW
,
2022 TRIGGER_TYPE_AFTER
, TRIGGER_TYPE_INSERT
);
2023 trigdesc
->trig_insert_instead_row
|=
2024 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_ROW
,
2025 TRIGGER_TYPE_INSTEAD
, TRIGGER_TYPE_INSERT
);
2026 trigdesc
->trig_insert_before_statement
|=
2027 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_STATEMENT
,
2028 TRIGGER_TYPE_BEFORE
, TRIGGER_TYPE_INSERT
);
2029 trigdesc
->trig_insert_after_statement
|=
2030 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_STATEMENT
,
2031 TRIGGER_TYPE_AFTER
, TRIGGER_TYPE_INSERT
);
2032 trigdesc
->trig_update_before_row
|=
2033 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_ROW
,
2034 TRIGGER_TYPE_BEFORE
, TRIGGER_TYPE_UPDATE
);
2035 trigdesc
->trig_update_after_row
|=
2036 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_ROW
,
2037 TRIGGER_TYPE_AFTER
, TRIGGER_TYPE_UPDATE
);
2038 trigdesc
->trig_update_instead_row
|=
2039 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_ROW
,
2040 TRIGGER_TYPE_INSTEAD
, TRIGGER_TYPE_UPDATE
);
2041 trigdesc
->trig_update_before_statement
|=
2042 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_STATEMENT
,
2043 TRIGGER_TYPE_BEFORE
, TRIGGER_TYPE_UPDATE
);
2044 trigdesc
->trig_update_after_statement
|=
2045 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_STATEMENT
,
2046 TRIGGER_TYPE_AFTER
, TRIGGER_TYPE_UPDATE
);
2047 trigdesc
->trig_delete_before_row
|=
2048 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_ROW
,
2049 TRIGGER_TYPE_BEFORE
, TRIGGER_TYPE_DELETE
);
2050 trigdesc
->trig_delete_after_row
|=
2051 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_ROW
,
2052 TRIGGER_TYPE_AFTER
, TRIGGER_TYPE_DELETE
);
2053 trigdesc
->trig_delete_instead_row
|=
2054 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_ROW
,
2055 TRIGGER_TYPE_INSTEAD
, TRIGGER_TYPE_DELETE
);
2056 trigdesc
->trig_delete_before_statement
|=
2057 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_STATEMENT
,
2058 TRIGGER_TYPE_BEFORE
, TRIGGER_TYPE_DELETE
);
2059 trigdesc
->trig_delete_after_statement
|=
2060 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_STATEMENT
,
2061 TRIGGER_TYPE_AFTER
, TRIGGER_TYPE_DELETE
);
2062 /* there are no row-level truncate triggers */
2063 trigdesc
->trig_truncate_before_statement
|=
2064 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_STATEMENT
,
2065 TRIGGER_TYPE_BEFORE
, TRIGGER_TYPE_TRUNCATE
);
2066 trigdesc
->trig_truncate_after_statement
|=
2067 TRIGGER_TYPE_MATCHES(tgtype
, TRIGGER_TYPE_STATEMENT
,
2068 TRIGGER_TYPE_AFTER
, TRIGGER_TYPE_TRUNCATE
);
2070 trigdesc
->trig_insert_new_table
|=
2071 (TRIGGER_FOR_INSERT(tgtype
) &&
2072 TRIGGER_USES_TRANSITION_TABLE(trigger
->tgnewtable
));
2073 trigdesc
->trig_update_old_table
|=
2074 (TRIGGER_FOR_UPDATE(tgtype
) &&
2075 TRIGGER_USES_TRANSITION_TABLE(trigger
->tgoldtable
));
2076 trigdesc
->trig_update_new_table
|=
2077 (TRIGGER_FOR_UPDATE(tgtype
) &&
2078 TRIGGER_USES_TRANSITION_TABLE(trigger
->tgnewtable
));
2079 trigdesc
->trig_delete_old_table
|=
2080 (TRIGGER_FOR_DELETE(tgtype
) &&
2081 TRIGGER_USES_TRANSITION_TABLE(trigger
->tgoldtable
));
2085 * Copy a TriggerDesc data structure.
2087 * The copy is allocated in the current memory context.
2090 CopyTriggerDesc(TriggerDesc
*trigdesc
)
2092 TriggerDesc
*newdesc
;
2096 if (trigdesc
== NULL
|| trigdesc
->numtriggers
<= 0)
2099 newdesc
= (TriggerDesc
*) palloc(sizeof(TriggerDesc
));
2100 memcpy(newdesc
, trigdesc
, sizeof(TriggerDesc
));
2102 trigger
= (Trigger
*) palloc(trigdesc
->numtriggers
* sizeof(Trigger
));
2103 memcpy(trigger
, trigdesc
->triggers
,
2104 trigdesc
->numtriggers
* sizeof(Trigger
));
2105 newdesc
->triggers
= trigger
;
2107 for (i
= 0; i
< trigdesc
->numtriggers
; i
++)
2109 trigger
->tgname
= pstrdup(trigger
->tgname
);
2110 if (trigger
->tgnattr
> 0)
2114 newattr
= (int16
*) palloc(trigger
->tgnattr
* sizeof(int16
));
2115 memcpy(newattr
, trigger
->tgattr
,
2116 trigger
->tgnattr
* sizeof(int16
));
2117 trigger
->tgattr
= newattr
;
2119 if (trigger
->tgnargs
> 0)
2124 newargs
= (char **) palloc(trigger
->tgnargs
* sizeof(char *));
2125 for (j
= 0; j
< trigger
->tgnargs
; j
++)
2126 newargs
[j
] = pstrdup(trigger
->tgargs
[j
]);
2127 trigger
->tgargs
= newargs
;
2129 if (trigger
->tgqual
)
2130 trigger
->tgqual
= pstrdup(trigger
->tgqual
);
2131 if (trigger
->tgoldtable
)
2132 trigger
->tgoldtable
= pstrdup(trigger
->tgoldtable
);
2133 if (trigger
->tgnewtable
)
2134 trigger
->tgnewtable
= pstrdup(trigger
->tgnewtable
);
2142 * Free a TriggerDesc data structure.
2145 FreeTriggerDesc(TriggerDesc
*trigdesc
)
2150 if (trigdesc
== NULL
)
2153 trigger
= trigdesc
->triggers
;
2154 for (i
= 0; i
< trigdesc
->numtriggers
; i
++)
2156 pfree(trigger
->tgname
);
2157 if (trigger
->tgnattr
> 0)
2158 pfree(trigger
->tgattr
);
2159 if (trigger
->tgnargs
> 0)
2161 while (--(trigger
->tgnargs
) >= 0)
2162 pfree(trigger
->tgargs
[trigger
->tgnargs
]);
2163 pfree(trigger
->tgargs
);
2165 if (trigger
->tgqual
)
2166 pfree(trigger
->tgqual
);
2167 if (trigger
->tgoldtable
)
2168 pfree(trigger
->tgoldtable
);
2169 if (trigger
->tgnewtable
)
2170 pfree(trigger
->tgnewtable
);
2173 pfree(trigdesc
->triggers
);
2178 * Compare two TriggerDesc structures for logical equality.
2182 equalTriggerDescs(TriggerDesc
*trigdesc1
, TriggerDesc
*trigdesc2
)
2188 * We need not examine the hint flags, just the trigger array itself; if
2189 * we have the same triggers with the same types, the flags should match.
2191 * As of 7.3 we assume trigger set ordering is significant in the
2192 * comparison; so we just compare corresponding slots of the two sets.
2194 * Note: comparing the stringToNode forms of the WHEN clauses means that
2195 * parse column locations will affect the result. This is okay as long as
2196 * this function is only used for detecting exact equality, as for example
2197 * in checking for staleness of a cache entry.
2199 if (trigdesc1
!= NULL
)
2201 if (trigdesc2
== NULL
)
2203 if (trigdesc1
->numtriggers
!= trigdesc2
->numtriggers
)
2205 for (i
= 0; i
< trigdesc1
->numtriggers
; i
++)
2207 Trigger
*trig1
= trigdesc1
->triggers
+ i
;
2208 Trigger
*trig2
= trigdesc2
->triggers
+ i
;
2210 if (trig1
->tgoid
!= trig2
->tgoid
)
2212 if (strcmp(trig1
->tgname
, trig2
->tgname
) != 0)
2214 if (trig1
->tgfoid
!= trig2
->tgfoid
)
2216 if (trig1
->tgtype
!= trig2
->tgtype
)
2218 if (trig1
->tgenabled
!= trig2
->tgenabled
)
2220 if (trig1
->tgisinternal
!= trig2
->tgisinternal
)
2222 if (trig1
->tgisclone
!= trig2
->tgisclone
)
2224 if (trig1
->tgconstrrelid
!= trig2
->tgconstrrelid
)
2226 if (trig1
->tgconstrindid
!= trig2
->tgconstrindid
)
2228 if (trig1
->tgconstraint
!= trig2
->tgconstraint
)
2230 if (trig1
->tgdeferrable
!= trig2
->tgdeferrable
)
2232 if (trig1
->tginitdeferred
!= trig2
->tginitdeferred
)
2234 if (trig1
->tgnargs
!= trig2
->tgnargs
)
2236 if (trig1
->tgnattr
!= trig2
->tgnattr
)
2238 if (trig1
->tgnattr
> 0 &&
2239 memcmp(trig1
->tgattr
, trig2
->tgattr
,
2240 trig1
->tgnattr
* sizeof(int16
)) != 0)
2242 for (j
= 0; j
< trig1
->tgnargs
; j
++)
2243 if (strcmp(trig1
->tgargs
[j
], trig2
->tgargs
[j
]) != 0)
2245 if (trig1
->tgqual
== NULL
&& trig2
->tgqual
== NULL
)
2247 else if (trig1
->tgqual
== NULL
|| trig2
->tgqual
== NULL
)
2249 else if (strcmp(trig1
->tgqual
, trig2
->tgqual
) != 0)
2251 if (trig1
->tgoldtable
== NULL
&& trig2
->tgoldtable
== NULL
)
2253 else if (trig1
->tgoldtable
== NULL
|| trig2
->tgoldtable
== NULL
)
2255 else if (strcmp(trig1
->tgoldtable
, trig2
->tgoldtable
) != 0)
2257 if (trig1
->tgnewtable
== NULL
&& trig2
->tgnewtable
== NULL
)
2259 else if (trig1
->tgnewtable
== NULL
|| trig2
->tgnewtable
== NULL
)
2261 else if (strcmp(trig1
->tgnewtable
, trig2
->tgnewtable
) != 0)
2265 else if (trigdesc2
!= NULL
)
2269 #endif /* NOT_USED */
2272 * Check if there is a row-level trigger with transition tables that prevents
2273 * a table from becoming an inheritance child or partition. Return the name
2274 * of the first such incompatible trigger, or NULL if there is none.
2277 FindTriggerIncompatibleWithInheritance(TriggerDesc
*trigdesc
)
2279 if (trigdesc
!= NULL
)
2283 for (i
= 0; i
< trigdesc
->numtriggers
; ++i
)
2285 Trigger
*trigger
= &trigdesc
->triggers
[i
];
2287 if (trigger
->tgoldtable
!= NULL
|| trigger
->tgnewtable
!= NULL
)
2288 return trigger
->tgname
;
2296 * Call a trigger function.
2298 * trigdata: trigger descriptor.
2299 * tgindx: trigger's index in finfo and instr arrays.
2300 * finfo: array of cached trigger function call information.
2301 * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2302 * per_tuple_context: memory context to execute the function in.
2304 * Returns the tuple (or NULL) as returned by the function.
2307 ExecCallTriggerFunc(TriggerData
*trigdata
,
2310 Instrumentation
*instr
,
2311 MemoryContext per_tuple_context
)
2313 LOCAL_FCINFO(fcinfo
, 0);
2314 PgStat_FunctionCallUsage fcusage
;
2316 MemoryContext oldContext
;
2319 * Protect against code paths that may fail to initialize transition table
2322 Assert(((TRIGGER_FIRED_BY_INSERT(trigdata
->tg_event
) ||
2323 TRIGGER_FIRED_BY_UPDATE(trigdata
->tg_event
) ||
2324 TRIGGER_FIRED_BY_DELETE(trigdata
->tg_event
)) &&
2325 TRIGGER_FIRED_AFTER(trigdata
->tg_event
) &&
2326 !(trigdata
->tg_event
& AFTER_TRIGGER_DEFERRABLE
) &&
2327 !(trigdata
->tg_event
& AFTER_TRIGGER_INITDEFERRED
)) ||
2328 (trigdata
->tg_oldtable
== NULL
&& trigdata
->tg_newtable
== NULL
));
2333 * We cache fmgr lookup info, to avoid making the lookup again on each
2336 if (finfo
->fn_oid
== InvalidOid
)
2337 fmgr_info(trigdata
->tg_trigger
->tgfoid
, finfo
);
2339 Assert(finfo
->fn_oid
== trigdata
->tg_trigger
->tgfoid
);
2342 * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2345 InstrStartNode(instr
+ tgindx
);
2348 * Do the function evaluation in the per-tuple memory context, so that
2349 * leaked memory will be reclaimed once per tuple. Note in particular that
2350 * any new tuple created by the trigger function will live till the end of
2353 oldContext
= MemoryContextSwitchTo(per_tuple_context
);
2356 * Call the function, passing no arguments but setting a context.
2358 InitFunctionCallInfoData(*fcinfo
, finfo
, 0,
2359 InvalidOid
, (Node
*) trigdata
, NULL
);
2361 pgstat_init_function_usage(fcinfo
, &fcusage
);
2366 result
= FunctionCallInvoke(fcinfo
);
2374 pgstat_end_function_usage(&fcusage
, true);
2376 MemoryContextSwitchTo(oldContext
);
2379 * Trigger protocol allows function to return a null pointer, but NOT to
2380 * set the isnull result flag.
2384 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED
),
2385 errmsg("trigger function %u returned null value",
2386 fcinfo
->flinfo
->fn_oid
)));
2389 * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2390 * one "tuple returned" (really the number of firings).
2393 InstrStopNode(instr
+ tgindx
, 1);
2395 return (HeapTuple
) DatumGetPointer(result
);
2399 ExecBSInsertTriggers(EState
*estate
, ResultRelInfo
*relinfo
)
2401 TriggerDesc
*trigdesc
;
2403 TriggerData LocTriggerData
= {0};
2405 trigdesc
= relinfo
->ri_TrigDesc
;
2407 if (trigdesc
== NULL
)
2409 if (!trigdesc
->trig_insert_before_statement
)
2412 /* no-op if we already fired BS triggers in this context */
2413 if (before_stmt_triggers_fired(RelationGetRelid(relinfo
->ri_RelationDesc
),
2417 LocTriggerData
.type
= T_TriggerData
;
2418 LocTriggerData
.tg_event
= TRIGGER_EVENT_INSERT
|
2419 TRIGGER_EVENT_BEFORE
;
2420 LocTriggerData
.tg_relation
= relinfo
->ri_RelationDesc
;
2421 for (i
= 0; i
< trigdesc
->numtriggers
; i
++)
2423 Trigger
*trigger
= &trigdesc
->triggers
[i
];
2426 if (!TRIGGER_TYPE_MATCHES(trigger
->tgtype
,
2427 TRIGGER_TYPE_STATEMENT
,
2428 TRIGGER_TYPE_BEFORE
,
2429 TRIGGER_TYPE_INSERT
))
2431 if (!TriggerEnabled(estate
, relinfo
, trigger
, LocTriggerData
.tg_event
,
2435 LocTriggerData
.tg_trigger
= trigger
;
2436 newtuple
= ExecCallTriggerFunc(&LocTriggerData
,
2438 relinfo
->ri_TrigFunctions
,
2439 relinfo
->ri_TrigInstrument
,
2440 GetPerTupleMemoryContext(estate
));
2444 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED
),
2445 errmsg("BEFORE STATEMENT trigger cannot return a value")));
2450 ExecASInsertTriggers(EState
*estate
, ResultRelInfo
*relinfo
,
2451 TransitionCaptureState
*transition_capture
)
2453 TriggerDesc
*trigdesc
= relinfo
->ri_TrigDesc
;
2455 if (trigdesc
&& trigdesc
->trig_insert_after_statement
)
2456 AfterTriggerSaveEvent(estate
, relinfo
, NULL
, NULL
,
2457 TRIGGER_EVENT_INSERT
,
2458 false, NULL
, NULL
, NIL
, NULL
, transition_capture
,
2463 ExecBRInsertTriggers(EState
*estate
, ResultRelInfo
*relinfo
,
2464 TupleTableSlot
*slot
)
2466 TriggerDesc
*trigdesc
= relinfo
->ri_TrigDesc
;
2467 HeapTuple newtuple
= NULL
;
2469 TriggerData LocTriggerData
= {0};
2472 LocTriggerData
.type
= T_TriggerData
;
2473 LocTriggerData
.tg_event
= TRIGGER_EVENT_INSERT
|
2475 TRIGGER_EVENT_BEFORE
;
2476 LocTriggerData
.tg_relation
= relinfo
->ri_RelationDesc
;
2477 for (i
= 0; i
< trigdesc
->numtriggers
; i
++)
2479 Trigger
*trigger
= &trigdesc
->triggers
[i
];
2482 if (!TRIGGER_TYPE_MATCHES(trigger
->tgtype
,
2484 TRIGGER_TYPE_BEFORE
,
2485 TRIGGER_TYPE_INSERT
))
2487 if (!TriggerEnabled(estate
, relinfo
, trigger
, LocTriggerData
.tg_event
,
2492 newtuple
= ExecFetchSlotHeapTuple(slot
, true, &should_free
);
2494 LocTriggerData
.tg_trigslot
= slot
;
2495 LocTriggerData
.tg_trigtuple
= oldtuple
= newtuple
;
2496 LocTriggerData
.tg_trigger
= trigger
;
2497 newtuple
= ExecCallTriggerFunc(&LocTriggerData
,
2499 relinfo
->ri_TrigFunctions
,
2500 relinfo
->ri_TrigInstrument
,
2501 GetPerTupleMemoryContext(estate
));
2502 if (newtuple
== NULL
)
2505 heap_freetuple(oldtuple
);
2506 return false; /* "do nothing" */
2508 else if (newtuple
!= oldtuple
)
2510 ExecForceStoreHeapTuple(newtuple
, slot
, false);
2513 * After a tuple in a partition goes through a trigger, the user
2514 * could have changed the partition key enough that the tuple no
2515 * longer fits the partition. Verify that.
2517 if (trigger
->tgisclone
&&
2518 !ExecPartitionCheck(relinfo
, slot
, estate
, false))
2520 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
2521 errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"),
2522 errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2524 get_namespace_name(RelationGetNamespace(relinfo
->ri_RelationDesc
)),
2525 RelationGetRelationName(relinfo
->ri_RelationDesc
))));
2528 heap_freetuple(oldtuple
);
2530 /* signal tuple should be re-fetched if used */
2539 ExecARInsertTriggers(EState
*estate
, ResultRelInfo
*relinfo
,
2540 TupleTableSlot
*slot
, List
*recheckIndexes
,
2541 TransitionCaptureState
*transition_capture
)
2543 TriggerDesc
*trigdesc
= relinfo
->ri_TrigDesc
;
2545 if ((trigdesc
&& trigdesc
->trig_insert_after_row
) ||
2546 (transition_capture
&& transition_capture
->tcs_insert_new_table
))
2547 AfterTriggerSaveEvent(estate
, relinfo
, NULL
, NULL
,
2548 TRIGGER_EVENT_INSERT
,
2550 recheckIndexes
, NULL
,
2556 ExecIRInsertTriggers(EState
*estate
, ResultRelInfo
*relinfo
,
2557 TupleTableSlot
*slot
)
2559 TriggerDesc
*trigdesc
= relinfo
->ri_TrigDesc
;
2560 HeapTuple newtuple
= NULL
;
2562 TriggerData LocTriggerData
= {0};
2565 LocTriggerData
.type
= T_TriggerData
;
2566 LocTriggerData
.tg_event
= TRIGGER_EVENT_INSERT
|
2568 TRIGGER_EVENT_INSTEAD
;
2569 LocTriggerData
.tg_relation
= relinfo
->ri_RelationDesc
;
2570 for (i
= 0; i
< trigdesc
->numtriggers
; i
++)
2572 Trigger
*trigger
= &trigdesc
->triggers
[i
];
2575 if (!TRIGGER_TYPE_MATCHES(trigger
->tgtype
,
2577 TRIGGER_TYPE_INSTEAD
,
2578 TRIGGER_TYPE_INSERT
))
2580 if (!TriggerEnabled(estate
, relinfo
, trigger
, LocTriggerData
.tg_event
,
2585 newtuple
= ExecFetchSlotHeapTuple(slot
, true, &should_free
);
2587 LocTriggerData
.tg_trigslot
= slot
;
2588 LocTriggerData
.tg_trigtuple
= oldtuple
= newtuple
;
2589 LocTriggerData
.tg_trigger
= trigger
;
2590 newtuple
= ExecCallTriggerFunc(&LocTriggerData
,
2592 relinfo
->ri_TrigFunctions
,
2593 relinfo
->ri_TrigInstrument
,
2594 GetPerTupleMemoryContext(estate
));
2595 if (newtuple
== NULL
)
2598 heap_freetuple(oldtuple
);
2599 return false; /* "do nothing" */
2601 else if (newtuple
!= oldtuple
)
2603 ExecForceStoreHeapTuple(newtuple
, slot
, false);
2606 heap_freetuple(oldtuple
);
2608 /* signal tuple should be re-fetched if used */
2617 ExecBSDeleteTriggers(EState
*estate
, ResultRelInfo
*relinfo
)
2619 TriggerDesc
*trigdesc
;
2621 TriggerData LocTriggerData
= {0};
2623 trigdesc
= relinfo
->ri_TrigDesc
;
2625 if (trigdesc
== NULL
)
2627 if (!trigdesc
->trig_delete_before_statement
)
2630 /* no-op if we already fired BS triggers in this context */
2631 if (before_stmt_triggers_fired(RelationGetRelid(relinfo
->ri_RelationDesc
),
2635 LocTriggerData
.type
= T_TriggerData
;
2636 LocTriggerData
.tg_event
= TRIGGER_EVENT_DELETE
|
2637 TRIGGER_EVENT_BEFORE
;
2638 LocTriggerData
.tg_relation
= relinfo
->ri_RelationDesc
;
2639 for (i
= 0; i
< trigdesc
->numtriggers
; i
++)
2641 Trigger
*trigger
= &trigdesc
->triggers
[i
];
2644 if (!TRIGGER_TYPE_MATCHES(trigger
->tgtype
,
2645 TRIGGER_TYPE_STATEMENT
,
2646 TRIGGER_TYPE_BEFORE
,
2647 TRIGGER_TYPE_DELETE
))
2649 if (!TriggerEnabled(estate
, relinfo
, trigger
, LocTriggerData
.tg_event
,
2653 LocTriggerData
.tg_trigger
= trigger
;
2654 newtuple
= ExecCallTriggerFunc(&LocTriggerData
,
2656 relinfo
->ri_TrigFunctions
,
2657 relinfo
->ri_TrigInstrument
,
2658 GetPerTupleMemoryContext(estate
));
2662 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED
),
2663 errmsg("BEFORE STATEMENT trigger cannot return a value")));
2668 ExecASDeleteTriggers(EState
*estate
, ResultRelInfo
*relinfo
,
2669 TransitionCaptureState
*transition_capture
)
2671 TriggerDesc
*trigdesc
= relinfo
->ri_TrigDesc
;
2673 if (trigdesc
&& trigdesc
->trig_delete_after_statement
)
2674 AfterTriggerSaveEvent(estate
, relinfo
, NULL
, NULL
,
2675 TRIGGER_EVENT_DELETE
,
2676 false, NULL
, NULL
, NIL
, NULL
, transition_capture
,
2681 * Execute BEFORE ROW DELETE triggers.
2683 * True indicates caller can proceed with the delete. False indicates caller
2684 * need to suppress the delete and additionally if requested, we need to pass
2685 * back the concurrently updated tuple if any.
2688 ExecBRDeleteTriggers(EState
*estate
, EPQState
*epqstate
,
2689 ResultRelInfo
*relinfo
,
2690 ItemPointer tupleid
,
2691 HeapTuple fdw_trigtuple
,
2692 TupleTableSlot
**epqslot
)
2694 TupleTableSlot
*slot
= ExecGetTriggerOldSlot(estate
, relinfo
);
2695 TriggerDesc
*trigdesc
= relinfo
->ri_TrigDesc
;
2697 TriggerData LocTriggerData
= {0};
2698 HeapTuple trigtuple
;
2699 bool should_free
= false;
2702 Assert(HeapTupleIsValid(fdw_trigtuple
) ^ ItemPointerIsValid(tupleid
));
2703 if (fdw_trigtuple
== NULL
)
2705 TupleTableSlot
*epqslot_candidate
= NULL
;
2707 if (!GetTupleForTrigger(estate
, epqstate
, relinfo
, tupleid
,
2708 LockTupleExclusive
, slot
, &epqslot_candidate
,
2713 * If the tuple was concurrently updated and the caller of this
2714 * function requested for the updated tuple, skip the trigger
2717 if (epqslot_candidate
!= NULL
&& epqslot
!= NULL
)
2719 *epqslot
= epqslot_candidate
;
2723 trigtuple
= ExecFetchSlotHeapTuple(slot
, true, &should_free
);
2727 trigtuple
= fdw_trigtuple
;
2728 ExecForceStoreHeapTuple(trigtuple
, slot
, false);
2731 LocTriggerData
.type
= T_TriggerData
;
2732 LocTriggerData
.tg_event
= TRIGGER_EVENT_DELETE
|
2734 TRIGGER_EVENT_BEFORE
;
2735 LocTriggerData
.tg_relation
= relinfo
->ri_RelationDesc
;
2736 for (i
= 0; i
< trigdesc
->numtriggers
; i
++)
2739 Trigger
*trigger
= &trigdesc
->triggers
[i
];
2741 if (!TRIGGER_TYPE_MATCHES(trigger
->tgtype
,
2743 TRIGGER_TYPE_BEFORE
,
2744 TRIGGER_TYPE_DELETE
))
2746 if (!TriggerEnabled(estate
, relinfo
, trigger
, LocTriggerData
.tg_event
,
2750 LocTriggerData
.tg_trigslot
= slot
;
2751 LocTriggerData
.tg_trigtuple
= trigtuple
;
2752 LocTriggerData
.tg_trigger
= trigger
;
2753 newtuple
= ExecCallTriggerFunc(&LocTriggerData
,
2755 relinfo
->ri_TrigFunctions
,
2756 relinfo
->ri_TrigInstrument
,
2757 GetPerTupleMemoryContext(estate
));
2758 if (newtuple
== NULL
)
2760 result
= false; /* tell caller to suppress delete */
2763 if (newtuple
!= trigtuple
)
2764 heap_freetuple(newtuple
);
2767 heap_freetuple(trigtuple
);
2773 * Note: is_crosspart_update must be true if the DELETE is being performed
2774 * as part of a cross-partition update.
2777 ExecARDeleteTriggers(EState
*estate
,
2778 ResultRelInfo
*relinfo
,
2779 ItemPointer tupleid
,
2780 HeapTuple fdw_trigtuple
,
2781 TransitionCaptureState
*transition_capture
,
2782 bool is_crosspart_update
)
2784 TriggerDesc
*trigdesc
= relinfo
->ri_TrigDesc
;
2786 if ((trigdesc
&& trigdesc
->trig_delete_after_row
) ||
2787 (transition_capture
&& transition_capture
->tcs_delete_old_table
))
2789 TupleTableSlot
*slot
= ExecGetTriggerOldSlot(estate
, relinfo
);
2791 Assert(HeapTupleIsValid(fdw_trigtuple
) ^ ItemPointerIsValid(tupleid
));
2792 if (fdw_trigtuple
== NULL
)
2793 GetTupleForTrigger(estate
,
2802 ExecForceStoreHeapTuple(fdw_trigtuple
, slot
, false);
2804 AfterTriggerSaveEvent(estate
, relinfo
, NULL
, NULL
,
2805 TRIGGER_EVENT_DELETE
,
2806 true, slot
, NULL
, NIL
, NULL
,
2808 is_crosspart_update
);
2813 ExecIRDeleteTriggers(EState
*estate
, ResultRelInfo
*relinfo
,
2814 HeapTuple trigtuple
)
2816 TriggerDesc
*trigdesc
= relinfo
->ri_TrigDesc
;
2817 TupleTableSlot
*slot
= ExecGetTriggerOldSlot(estate
, relinfo
);
2818 TriggerData LocTriggerData
= {0};
2821 LocTriggerData
.type
= T_TriggerData
;
2822 LocTriggerData
.tg_event
= TRIGGER_EVENT_DELETE
|
2824 TRIGGER_EVENT_INSTEAD
;
2825 LocTriggerData
.tg_relation
= relinfo
->ri_RelationDesc
;
2827 ExecForceStoreHeapTuple(trigtuple
, slot
, false);
2829 for (i
= 0; i
< trigdesc
->numtriggers
; i
++)
2832 Trigger
*trigger
= &trigdesc
->triggers
[i
];
2834 if (!TRIGGER_TYPE_MATCHES(trigger
->tgtype
,
2836 TRIGGER_TYPE_INSTEAD
,
2837 TRIGGER_TYPE_DELETE
))
2839 if (!TriggerEnabled(estate
, relinfo
, trigger
, LocTriggerData
.tg_event
,
2843 LocTriggerData
.tg_trigslot
= slot
;
2844 LocTriggerData
.tg_trigtuple
= trigtuple
;
2845 LocTriggerData
.tg_trigger
= trigger
;
2846 rettuple
= ExecCallTriggerFunc(&LocTriggerData
,
2848 relinfo
->ri_TrigFunctions
,
2849 relinfo
->ri_TrigInstrument
,
2850 GetPerTupleMemoryContext(estate
));
2851 if (rettuple
== NULL
)
2852 return false; /* Delete was suppressed */
2853 if (rettuple
!= trigtuple
)
2854 heap_freetuple(rettuple
);
2860 ExecBSUpdateTriggers(EState
*estate
, ResultRelInfo
*relinfo
)
2862 TriggerDesc
*trigdesc
;
2864 TriggerData LocTriggerData
= {0};
2865 Bitmapset
*updatedCols
;
2867 trigdesc
= relinfo
->ri_TrigDesc
;
2869 if (trigdesc
== NULL
)
2871 if (!trigdesc
->trig_update_before_statement
)
2874 /* no-op if we already fired BS triggers in this context */
2875 if (before_stmt_triggers_fired(RelationGetRelid(relinfo
->ri_RelationDesc
),
2879 /* statement-level triggers operate on the parent table */
2880 Assert(relinfo
->ri_RootResultRelInfo
== NULL
);
2882 updatedCols
= ExecGetAllUpdatedCols(relinfo
, estate
);
2884 LocTriggerData
.type
= T_TriggerData
;
2885 LocTriggerData
.tg_event
= TRIGGER_EVENT_UPDATE
|
2886 TRIGGER_EVENT_BEFORE
;
2887 LocTriggerData
.tg_relation
= relinfo
->ri_RelationDesc
;
2888 LocTriggerData
.tg_updatedcols
= updatedCols
;
2889 for (i
= 0; i
< trigdesc
->numtriggers
; i
++)
2891 Trigger
*trigger
= &trigdesc
->triggers
[i
];
2894 if (!TRIGGER_TYPE_MATCHES(trigger
->tgtype
,
2895 TRIGGER_TYPE_STATEMENT
,
2896 TRIGGER_TYPE_BEFORE
,
2897 TRIGGER_TYPE_UPDATE
))
2899 if (!TriggerEnabled(estate
, relinfo
, trigger
, LocTriggerData
.tg_event
,
2900 updatedCols
, NULL
, NULL
))
2903 LocTriggerData
.tg_trigger
= trigger
;
2904 newtuple
= ExecCallTriggerFunc(&LocTriggerData
,
2906 relinfo
->ri_TrigFunctions
,
2907 relinfo
->ri_TrigInstrument
,
2908 GetPerTupleMemoryContext(estate
));
2912 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED
),
2913 errmsg("BEFORE STATEMENT trigger cannot return a value")));
2918 ExecASUpdateTriggers(EState
*estate
, ResultRelInfo
*relinfo
,
2919 TransitionCaptureState
*transition_capture
)
2921 TriggerDesc
*trigdesc
= relinfo
->ri_TrigDesc
;
2923 /* statement-level triggers operate on the parent table */
2924 Assert(relinfo
->ri_RootResultRelInfo
== NULL
);
2926 if (trigdesc
&& trigdesc
->trig_update_after_statement
)
2927 AfterTriggerSaveEvent(estate
, relinfo
, NULL
, NULL
,
2928 TRIGGER_EVENT_UPDATE
,
2929 false, NULL
, NULL
, NIL
,
2930 ExecGetAllUpdatedCols(relinfo
, estate
),
2936 ExecBRUpdateTriggers(EState
*estate
, EPQState
*epqstate
,
2937 ResultRelInfo
*relinfo
,
2938 ItemPointer tupleid
,
2939 HeapTuple fdw_trigtuple
,
2940 TupleTableSlot
*newslot
,
2941 TM_FailureData
*tmfd
)
2943 TriggerDesc
*trigdesc
= relinfo
->ri_TrigDesc
;
2944 TupleTableSlot
*oldslot
= ExecGetTriggerOldSlot(estate
, relinfo
);
2945 HeapTuple newtuple
= NULL
;
2946 HeapTuple trigtuple
;
2947 bool should_free_trig
= false;
2948 bool should_free_new
= false;
2949 TriggerData LocTriggerData
= {0};
2951 Bitmapset
*updatedCols
;
2952 LockTupleMode lockmode
;
2954 /* Determine lock mode to use */
2955 lockmode
= ExecUpdateLockMode(estate
, relinfo
);
2957 Assert(HeapTupleIsValid(fdw_trigtuple
) ^ ItemPointerIsValid(tupleid
));
2958 if (fdw_trigtuple
== NULL
)
2960 TupleTableSlot
*epqslot_candidate
= NULL
;
2962 /* get a copy of the on-disk tuple we are planning to update */
2963 if (!GetTupleForTrigger(estate
, epqstate
, relinfo
, tupleid
,
2964 lockmode
, oldslot
, &epqslot_candidate
,
2966 return false; /* cancel the update action */
2969 * In READ COMMITTED isolation level it's possible that target tuple
2970 * was changed due to concurrent update. In that case we have a raw
2971 * subplan output tuple in epqslot_candidate, and need to form a new
2972 * insertable tuple using ExecGetUpdateNewTuple to replace the one we
2973 * received in newslot. Neither we nor our callers have any further
2974 * interest in the passed-in tuple, so it's okay to overwrite newslot
2975 * with the newer data.
2977 * (Typically, newslot was also generated by ExecGetUpdateNewTuple, so
2978 * that epqslot_clean will be that same slot and the copy step below
2981 if (epqslot_candidate
!= NULL
)
2983 TupleTableSlot
*epqslot_clean
;
2985 epqslot_clean
= ExecGetUpdateNewTuple(relinfo
, epqslot_candidate
,
2988 if (newslot
!= epqslot_clean
)
2989 ExecCopySlot(newslot
, epqslot_clean
);
2992 trigtuple
= ExecFetchSlotHeapTuple(oldslot
, true, &should_free_trig
);
2996 ExecForceStoreHeapTuple(fdw_trigtuple
, oldslot
, false);
2997 trigtuple
= fdw_trigtuple
;
3000 LocTriggerData
.type
= T_TriggerData
;
3001 LocTriggerData
.tg_event
= TRIGGER_EVENT_UPDATE
|
3003 TRIGGER_EVENT_BEFORE
;
3004 LocTriggerData
.tg_relation
= relinfo
->ri_RelationDesc
;
3005 updatedCols
= ExecGetAllUpdatedCols(relinfo
, estate
);
3006 LocTriggerData
.tg_updatedcols
= updatedCols
;
3007 for (i
= 0; i
< trigdesc
->numtriggers
; i
++)
3009 Trigger
*trigger
= &trigdesc
->triggers
[i
];
3012 if (!TRIGGER_TYPE_MATCHES(trigger
->tgtype
,
3014 TRIGGER_TYPE_BEFORE
,
3015 TRIGGER_TYPE_UPDATE
))
3017 if (!TriggerEnabled(estate
, relinfo
, trigger
, LocTriggerData
.tg_event
,
3018 updatedCols
, oldslot
, newslot
))
3022 newtuple
= ExecFetchSlotHeapTuple(newslot
, true, &should_free_new
);
3024 LocTriggerData
.tg_trigslot
= oldslot
;
3025 LocTriggerData
.tg_trigtuple
= trigtuple
;
3026 LocTriggerData
.tg_newtuple
= oldtuple
= newtuple
;
3027 LocTriggerData
.tg_newslot
= newslot
;
3028 LocTriggerData
.tg_trigger
= trigger
;
3029 newtuple
= ExecCallTriggerFunc(&LocTriggerData
,
3031 relinfo
->ri_TrigFunctions
,
3032 relinfo
->ri_TrigInstrument
,
3033 GetPerTupleMemoryContext(estate
));
3035 if (newtuple
== NULL
)
3037 if (should_free_trig
)
3038 heap_freetuple(trigtuple
);
3039 if (should_free_new
)
3040 heap_freetuple(oldtuple
);
3041 return false; /* "do nothing" */
3043 else if (newtuple
!= oldtuple
)
3045 ExecForceStoreHeapTuple(newtuple
, newslot
, false);
3048 * If the tuple returned by the trigger / being stored, is the old
3049 * row version, and the heap tuple passed to the trigger was
3050 * allocated locally, materialize the slot. Otherwise we might
3051 * free it while still referenced by the slot.
3053 if (should_free_trig
&& newtuple
== trigtuple
)
3054 ExecMaterializeSlot(newslot
);
3056 if (should_free_new
)
3057 heap_freetuple(oldtuple
);
3059 /* signal tuple should be re-fetched if used */
3063 if (should_free_trig
)
3064 heap_freetuple(trigtuple
);
3070 * Note: 'src_partinfo' and 'dst_partinfo', when non-NULL, refer to the source
3071 * and destination partitions, respectively, of a cross-partition update of
3072 * the root partitioned table mentioned in the query, given by 'relinfo'.
3073 * 'tupleid' in that case refers to the ctid of the "old" tuple in the source
3074 * partition, and 'newslot' contains the "new" tuple in the destination
3075 * partition. This interface allows to support the requirements of
3076 * ExecCrossPartitionUpdateForeignKey(); is_crosspart_update must be true in
3080 ExecARUpdateTriggers(EState
*estate
, ResultRelInfo
*relinfo
,
3081 ResultRelInfo
*src_partinfo
,
3082 ResultRelInfo
*dst_partinfo
,
3083 ItemPointer tupleid
,
3084 HeapTuple fdw_trigtuple
,
3085 TupleTableSlot
*newslot
,
3086 List
*recheckIndexes
,
3087 TransitionCaptureState
*transition_capture
,
3088 bool is_crosspart_update
)
3090 TriggerDesc
*trigdesc
= relinfo
->ri_TrigDesc
;
3092 if ((trigdesc
&& trigdesc
->trig_update_after_row
) ||
3093 (transition_capture
&&
3094 (transition_capture
->tcs_update_old_table
||
3095 transition_capture
->tcs_update_new_table
)))
3098 * Note: if the UPDATE is converted into a DELETE+INSERT as part of
3099 * update-partition-key operation, then this function is also called
3100 * separately for DELETE and INSERT to capture transition table rows.
3101 * In such case, either old tuple or new tuple can be NULL.
3103 TupleTableSlot
*oldslot
;
3104 ResultRelInfo
*tupsrc
;
3106 Assert((src_partinfo
!= NULL
&& dst_partinfo
!= NULL
) ||
3107 !is_crosspart_update
);
3109 tupsrc
= src_partinfo
? src_partinfo
: relinfo
;
3110 oldslot
= ExecGetTriggerOldSlot(estate
, tupsrc
);
3112 if (fdw_trigtuple
== NULL
&& ItemPointerIsValid(tupleid
))
3113 GetTupleForTrigger(estate
,
3121 else if (fdw_trigtuple
!= NULL
)
3122 ExecForceStoreHeapTuple(fdw_trigtuple
, oldslot
, false);
3124 ExecClearTuple(oldslot
);
3126 AfterTriggerSaveEvent(estate
, relinfo
,
3127 src_partinfo
, dst_partinfo
,
3128 TRIGGER_EVENT_UPDATE
,
3130 oldslot
, newslot
, recheckIndexes
,
3131 ExecGetAllUpdatedCols(relinfo
, estate
),
3133 is_crosspart_update
);
3138 ExecIRUpdateTriggers(EState
*estate
, ResultRelInfo
*relinfo
,
3139 HeapTuple trigtuple
, TupleTableSlot
*newslot
)
3141 TriggerDesc
*trigdesc
= relinfo
->ri_TrigDesc
;
3142 TupleTableSlot
*oldslot
= ExecGetTriggerOldSlot(estate
, relinfo
);
3143 HeapTuple newtuple
= NULL
;
3145 TriggerData LocTriggerData
= {0};
3148 LocTriggerData
.type
= T_TriggerData
;
3149 LocTriggerData
.tg_event
= TRIGGER_EVENT_UPDATE
|
3151 TRIGGER_EVENT_INSTEAD
;
3152 LocTriggerData
.tg_relation
= relinfo
->ri_RelationDesc
;
3154 ExecForceStoreHeapTuple(trigtuple
, oldslot
, false);
3156 for (i
= 0; i
< trigdesc
->numtriggers
; i
++)
3158 Trigger
*trigger
= &trigdesc
->triggers
[i
];
3161 if (!TRIGGER_TYPE_MATCHES(trigger
->tgtype
,
3163 TRIGGER_TYPE_INSTEAD
,
3164 TRIGGER_TYPE_UPDATE
))
3166 if (!TriggerEnabled(estate
, relinfo
, trigger
, LocTriggerData
.tg_event
,
3167 NULL
, oldslot
, newslot
))
3171 newtuple
= ExecFetchSlotHeapTuple(newslot
, true, &should_free
);
3173 LocTriggerData
.tg_trigslot
= oldslot
;
3174 LocTriggerData
.tg_trigtuple
= trigtuple
;
3175 LocTriggerData
.tg_newslot
= newslot
;
3176 LocTriggerData
.tg_newtuple
= oldtuple
= newtuple
;
3178 LocTriggerData
.tg_trigger
= trigger
;
3179 newtuple
= ExecCallTriggerFunc(&LocTriggerData
,
3181 relinfo
->ri_TrigFunctions
,
3182 relinfo
->ri_TrigInstrument
,
3183 GetPerTupleMemoryContext(estate
));
3184 if (newtuple
== NULL
)
3186 return false; /* "do nothing" */
3188 else if (newtuple
!= oldtuple
)
3190 ExecForceStoreHeapTuple(newtuple
, newslot
, false);
3193 heap_freetuple(oldtuple
);
3195 /* signal tuple should be re-fetched if used */
3204 ExecBSTruncateTriggers(EState
*estate
, ResultRelInfo
*relinfo
)
3206 TriggerDesc
*trigdesc
;
3208 TriggerData LocTriggerData
= {0};
3210 trigdesc
= relinfo
->ri_TrigDesc
;
3212 if (trigdesc
== NULL
)
3214 if (!trigdesc
->trig_truncate_before_statement
)
3217 LocTriggerData
.type
= T_TriggerData
;
3218 LocTriggerData
.tg_event
= TRIGGER_EVENT_TRUNCATE
|
3219 TRIGGER_EVENT_BEFORE
;
3220 LocTriggerData
.tg_relation
= relinfo
->ri_RelationDesc
;
3222 for (i
= 0; i
< trigdesc
->numtriggers
; i
++)
3224 Trigger
*trigger
= &trigdesc
->triggers
[i
];
3227 if (!TRIGGER_TYPE_MATCHES(trigger
->tgtype
,
3228 TRIGGER_TYPE_STATEMENT
,
3229 TRIGGER_TYPE_BEFORE
,
3230 TRIGGER_TYPE_TRUNCATE
))
3232 if (!TriggerEnabled(estate
, relinfo
, trigger
, LocTriggerData
.tg_event
,
3236 LocTriggerData
.tg_trigger
= trigger
;
3237 newtuple
= ExecCallTriggerFunc(&LocTriggerData
,
3239 relinfo
->ri_TrigFunctions
,
3240 relinfo
->ri_TrigInstrument
,
3241 GetPerTupleMemoryContext(estate
));
3245 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED
),
3246 errmsg("BEFORE STATEMENT trigger cannot return a value")));
3251 ExecASTruncateTriggers(EState
*estate
, ResultRelInfo
*relinfo
)
3253 TriggerDesc
*trigdesc
= relinfo
->ri_TrigDesc
;
3255 if (trigdesc
&& trigdesc
->trig_truncate_after_statement
)
3256 AfterTriggerSaveEvent(estate
, relinfo
,
3258 TRIGGER_EVENT_TRUNCATE
,
3259 false, NULL
, NULL
, NIL
, NULL
, NULL
,
3265 * Fetch tuple into "oldslot", dealing with locking and EPQ if necessary
3268 GetTupleForTrigger(EState
*estate
,
3270 ResultRelInfo
*relinfo
,
3272 LockTupleMode lockmode
,
3273 TupleTableSlot
*oldslot
,
3274 TupleTableSlot
**epqslot
,
3275 TM_FailureData
*tmfdp
)
3277 Relation relation
= relinfo
->ri_RelationDesc
;
3279 if (epqslot
!= NULL
)
3282 TM_FailureData tmfd
;
3287 /* caller must pass an epqstate if EvalPlanQual is possible */
3288 Assert(epqstate
!= NULL
);
3291 * lock tuple for update
3293 if (!IsolationUsesXactSnapshot())
3294 lockflags
|= TUPLE_LOCK_FLAG_FIND_LAST_VERSION
;
3295 test
= table_tuple_lock(relation
, tid
, estate
->es_snapshot
, oldslot
,
3296 estate
->es_output_cid
,
3297 lockmode
, LockWaitBlock
,
3301 /* Let the caller know about the status of this operation */
3307 case TM_SelfModified
:
3310 * The target tuple was already updated or deleted by the
3311 * current command, or by a later command in the current
3312 * transaction. We ignore the tuple in the former case, and
3313 * throw error in the latter case, for the same reasons
3314 * enumerated in ExecUpdate and ExecDelete in
3315 * nodeModifyTable.c.
3317 if (tmfd
.cmax
!= estate
->es_output_cid
)
3319 (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION
),
3320 errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3321 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3323 /* treat it as deleted; do not process */
3329 *epqslot
= EvalPlanQual(epqstate
,
3331 relinfo
->ri_RangeTableIndex
,
3335 * If PlanQual failed for updated tuple - we must not
3336 * process this tuple!
3338 if (TupIsNull(*epqslot
))
3347 if (IsolationUsesXactSnapshot())
3349 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE
),
3350 errmsg("could not serialize access due to concurrent update")));
3351 elog(ERROR
, "unexpected table_tuple_lock status: %u", test
);
3355 if (IsolationUsesXactSnapshot())
3357 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE
),
3358 errmsg("could not serialize access due to concurrent delete")));
3359 /* tuple was deleted */
3363 elog(ERROR
, "attempted to lock invisible tuple");
3367 elog(ERROR
, "unrecognized table_tuple_lock status: %u", test
);
3368 return false; /* keep compiler quiet */
3374 * We expect the tuple to be present, thus very simple error handling
3377 if (!table_tuple_fetch_row_version(relation
, tid
, SnapshotAny
,
3379 elog(ERROR
, "failed to fetch tuple for trigger");
3386 * Is trigger enabled to fire?
3389 TriggerEnabled(EState
*estate
, ResultRelInfo
*relinfo
,
3390 Trigger
*trigger
, TriggerEvent event
,
3391 Bitmapset
*modifiedCols
,
3392 TupleTableSlot
*oldslot
, TupleTableSlot
*newslot
)
3394 /* Check replication-role-dependent enable state */
3395 if (SessionReplicationRole
== SESSION_REPLICATION_ROLE_REPLICA
)
3397 if (trigger
->tgenabled
== TRIGGER_FIRES_ON_ORIGIN
||
3398 trigger
->tgenabled
== TRIGGER_DISABLED
)
3401 else /* ORIGIN or LOCAL role */
3403 if (trigger
->tgenabled
== TRIGGER_FIRES_ON_REPLICA
||
3404 trigger
->tgenabled
== TRIGGER_DISABLED
)
3409 * Check for column-specific trigger (only possible for UPDATE, and in
3410 * fact we *must* ignore tgattr for other event types)
3412 if (trigger
->tgnattr
> 0 && TRIGGER_FIRED_BY_UPDATE(event
))
3418 for (i
= 0; i
< trigger
->tgnattr
; i
++)
3420 if (bms_is_member(trigger
->tgattr
[i
] - FirstLowInvalidHeapAttributeNumber
,
3431 /* Check for WHEN clause */
3432 if (trigger
->tgqual
)
3434 ExprState
**predicate
;
3435 ExprContext
*econtext
;
3436 MemoryContext oldContext
;
3439 Assert(estate
!= NULL
);
3442 * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3443 * matching element of relinfo->ri_TrigWhenExprs[]
3445 i
= trigger
- relinfo
->ri_TrigDesc
->triggers
;
3446 predicate
= &relinfo
->ri_TrigWhenExprs
[i
];
3449 * If first time through for this WHEN expression, build expression
3450 * nodetrees for it. Keep them in the per-query memory context so
3451 * they'll survive throughout the query.
3453 if (*predicate
== NULL
)
3457 oldContext
= MemoryContextSwitchTo(estate
->es_query_cxt
);
3458 tgqual
= stringToNode(trigger
->tgqual
);
3459 /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3460 ChangeVarNodes(tgqual
, PRS2_OLD_VARNO
, INNER_VAR
, 0);
3461 ChangeVarNodes(tgqual
, PRS2_NEW_VARNO
, OUTER_VAR
, 0);
3462 /* ExecPrepareQual wants implicit-AND form */
3463 tgqual
= (Node
*) make_ands_implicit((Expr
*) tgqual
);
3464 *predicate
= ExecPrepareQual((List
*) tgqual
, estate
);
3465 MemoryContextSwitchTo(oldContext
);
3469 * We will use the EState's per-tuple context for evaluating WHEN
3470 * expressions (creating it if it's not already there).
3472 econtext
= GetPerTupleExprContext(estate
);
3475 * Finally evaluate the expression, making the old and/or new tuples
3476 * available as INNER_VAR/OUTER_VAR respectively.
3478 econtext
->ecxt_innertuple
= oldslot
;
3479 econtext
->ecxt_outertuple
= newslot
;
3480 if (!ExecQual(*predicate
, econtext
))
3489 * After-trigger stuff
3491 * The AfterTriggersData struct holds data about pending AFTER trigger events
3492 * during the current transaction tree. (BEFORE triggers are fired
3493 * immediately so we don't need any persistent state about them.) The struct
3494 * and most of its subsidiary data are kept in TopTransactionContext; however
3495 * some data that can be discarded sooner appears in the CurTransactionContext
3496 * of the relevant subtransaction. Also, the individual event records are
3497 * kept in a separate sub-context of TopTransactionContext. This is done
3498 * mainly so that it's easy to tell from a memory context dump how much space
3499 * is being eaten by trigger events.
3501 * Because the list of pending events can grow large, we go to some
3502 * considerable effort to minimize per-event memory consumption. The event
3503 * records are grouped into chunks and common data for similar events in the
3504 * same chunk is only stored once.
3506 * XXX We need to be able to save the per-event data in a file if it grows too
3511 /* Per-trigger SET CONSTRAINT status */
3512 typedef struct SetConstraintTriggerData
3515 bool sct_tgisdeferred
;
3516 } SetConstraintTriggerData
;
3518 typedef struct SetConstraintTriggerData
*SetConstraintTrigger
;
3521 * SET CONSTRAINT intra-transaction status.
3523 * We make this a single palloc'd object so it can be copied and freed easily.
3525 * all_isset and all_isdeferred are used to keep track
3526 * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3528 * trigstates[] stores per-trigger tgisdeferred settings.
3530 typedef struct SetConstraintStateData
3533 bool all_isdeferred
;
3534 int numstates
; /* number of trigstates[] entries in use */
3535 int numalloc
; /* allocated size of trigstates[] */
3536 SetConstraintTriggerData trigstates
[FLEXIBLE_ARRAY_MEMBER
];
3537 } SetConstraintStateData
;
3539 typedef SetConstraintStateData
*SetConstraintState
;
3543 * Per-trigger-event data
3545 * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3546 * status bits, up to two tuple CTIDs, and optionally two OIDs of partitions.
3547 * Each event record also has an associated AfterTriggerSharedData that is
3548 * shared across all instances of similar events within a "chunk".
3550 * For row-level triggers, we arrange not to waste storage on unneeded ctid
3551 * fields. Updates of regular tables use two; inserts and deletes of regular
3552 * tables use one; foreign tables always use zero and save the tuple(s) to a
3553 * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3554 * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3555 * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3556 * tuple(s). This permits storing tuples once regardless of the number of
3557 * row-level triggers on a foreign table.
3559 * When updates on partitioned tables cause rows to move between partitions,
3560 * the OIDs of both partitions are stored too, so that the tuples can be
3561 * fetched; such entries are marked AFTER_TRIGGER_CP_UPDATE (for "cross-
3562 * partition update").
3564 * Note that we need triggers on foreign tables to be fired in exactly the
3565 * order they were queued, so that the tuples come out of the tuplestore in
3566 * the right order. To ensure that, we forbid deferrable (constraint)
3567 * triggers on foreign tables. This also ensures that such triggers do not
3568 * get deferred into outer trigger query levels, meaning that it's okay to
3569 * destroy the tuplestore at the end of the query level.
3571 * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3572 * require no ctid field. We lack the flag bit space to neatly represent that
3573 * distinct case, and it seems unlikely to be worth much trouble.
3575 * Note: ats_firing_id is initially zero and is set to something else when
3576 * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3577 * cycle the trigger will be fired in (or was fired in, if DONE is set).
3578 * Although this is mutable state, we can keep it in AfterTriggerSharedData
3579 * because all instances of the same type of event in a given event list will
3580 * be fired at the same time, if they were queued between the same firing
3581 * cycles. So we need only ensure that ats_firing_id is zero when attaching
3582 * a new event to an existing AfterTriggerSharedData record.
3584 typedef uint32 TriggerFlags
;
3586 #define AFTER_TRIGGER_OFFSET 0x07FFFFFF /* must be low-order bits */
3587 #define AFTER_TRIGGER_DONE 0x80000000
3588 #define AFTER_TRIGGER_IN_PROGRESS 0x40000000
3589 /* bits describing the size and tuple sources of this event */
3590 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3591 #define AFTER_TRIGGER_FDW_FETCH 0x20000000
3592 #define AFTER_TRIGGER_1CTID 0x10000000
3593 #define AFTER_TRIGGER_2CTID 0x30000000
3594 #define AFTER_TRIGGER_CP_UPDATE 0x08000000
3595 #define AFTER_TRIGGER_TUP_BITS 0x38000000
3596 typedef struct AfterTriggerSharedData
*AfterTriggerShared
;
3598 typedef struct AfterTriggerSharedData
3600 TriggerEvent ats_event
; /* event type indicator, see trigger.h */
3601 Oid ats_tgoid
; /* the trigger's ID */
3602 Oid ats_relid
; /* the relation it's on */
3603 CommandId ats_firing_id
; /* ID for firing cycle */
3604 struct AfterTriggersTableData
*ats_table
; /* transition table access */
3605 Bitmapset
*ats_modifiedcols
; /* modified columns */
3606 } AfterTriggerSharedData
;
3608 typedef struct AfterTriggerEventData
*AfterTriggerEvent
;
3610 typedef struct AfterTriggerEventData
3612 TriggerFlags ate_flags
; /* status bits and offset to shared data */
3613 ItemPointerData ate_ctid1
; /* inserted, deleted, or old updated tuple */
3614 ItemPointerData ate_ctid2
; /* new updated tuple */
3617 * During a cross-partition update of a partitioned table, we also store
3618 * the OIDs of source and destination partitions that are needed to fetch
3619 * the old (ctid1) and the new tuple (ctid2) from, respectively.
3623 } AfterTriggerEventData
;
3625 /* AfterTriggerEventData, minus ate_src_part, ate_dst_part */
3626 typedef struct AfterTriggerEventDataNoOids
3628 TriggerFlags ate_flags
;
3629 ItemPointerData ate_ctid1
;
3630 ItemPointerData ate_ctid2
;
3631 } AfterTriggerEventDataNoOids
;
3633 /* AfterTriggerEventData, minus ate_*_part and ate_ctid2 */
3634 typedef struct AfterTriggerEventDataOneCtid
3636 TriggerFlags ate_flags
; /* status bits and offset to shared data */
3637 ItemPointerData ate_ctid1
; /* inserted, deleted, or old updated tuple */
3638 } AfterTriggerEventDataOneCtid
;
3640 /* AfterTriggerEventData, minus ate_*_part, ate_ctid1 and ate_ctid2 */
3641 typedef struct AfterTriggerEventDataZeroCtids
3643 TriggerFlags ate_flags
; /* status bits and offset to shared data */
3644 } AfterTriggerEventDataZeroCtids
;
3646 #define SizeofTriggerEvent(evt) \
3647 (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_CP_UPDATE ? \
3648 sizeof(AfterTriggerEventData) : \
3649 (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3650 sizeof(AfterTriggerEventDataNoOids) : \
3651 (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3652 sizeof(AfterTriggerEventDataOneCtid) : \
3653 sizeof(AfterTriggerEventDataZeroCtids))))
3655 #define GetTriggerSharedData(evt) \
3656 ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3659 * To avoid palloc overhead, we keep trigger events in arrays in successively-
3660 * larger chunks (a slightly more sophisticated version of an expansible
3661 * array). The space between CHUNK_DATA_START and freeptr is occupied by
3662 * AfterTriggerEventData records; the space between endfree and endptr is
3663 * occupied by AfterTriggerSharedData records.
3665 typedef struct AfterTriggerEventChunk
3667 struct AfterTriggerEventChunk
*next
; /* list link */
3668 char *freeptr
; /* start of free space in chunk */
3669 char *endfree
; /* end of free space in chunk */
3670 char *endptr
; /* end of chunk */
3671 /* event data follows here */
3672 } AfterTriggerEventChunk
;
3674 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3676 /* A list of events */
3677 typedef struct AfterTriggerEventList
3679 AfterTriggerEventChunk
*head
;
3680 AfterTriggerEventChunk
*tail
;
3681 char *tailfree
; /* freeptr of tail chunk */
3682 } AfterTriggerEventList
;
3684 /* Macros to help in iterating over a list of events */
3685 #define for_each_chunk(cptr, evtlist) \
3686 for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3687 #define for_each_event(eptr, cptr) \
3688 for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3689 (char *) eptr < (cptr)->freeptr; \
3690 eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3691 /* Use this if no special per-chunk processing is needed */
3692 #define for_each_event_chunk(eptr, cptr, evtlist) \
3693 for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3695 /* Macros for iterating from a start point that might not be list start */
3696 #define for_each_chunk_from(cptr) \
3697 for (; cptr != NULL; cptr = cptr->next)
3698 #define for_each_event_from(eptr, cptr) \
3700 (char *) eptr < (cptr)->freeptr; \
3701 eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3705 * All per-transaction data for the AFTER TRIGGERS module.
3707 * AfterTriggersData has the following fields:
3709 * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3710 * We mark firable events with the current firing cycle's ID so that we can
3711 * tell which ones to work on. This ensures sane behavior if a trigger
3712 * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3713 * only fire those events that weren't already scheduled for firing.
3715 * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3716 * This is saved and restored across failed subtransactions.
3718 * events is the current list of deferred events. This is global across
3719 * all subtransactions of the current transaction. In a subtransaction
3720 * abort, we know that the events added by the subtransaction are at the
3721 * end of the list, so it is relatively easy to discard them. The event
3722 * list chunks themselves are stored in event_cxt.
3724 * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3725 * (-1 when the stack is empty).
3727 * query_stack[query_depth] is the per-query-level data, including these fields:
3729 * events is a list of AFTER trigger events queued by the current query.
3730 * None of these are valid until the matching AfterTriggerEndQuery call
3731 * occurs. At that point we fire immediate-mode triggers, and append any
3732 * deferred events to the main events list.
3734 * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3735 * needed by events queued by the current query. (Note: we use just one
3736 * tuplestore even though more than one foreign table might be involved.
3737 * This is okay because tuplestores don't really care what's in the tuples
3738 * they store; but it's possible that someday it'd break.)
3740 * tables is a List of AfterTriggersTableData structs for target tables
3741 * of the current query (see below).
3743 * maxquerydepth is just the allocated length of query_stack.
3745 * trans_stack holds per-subtransaction data, including these fields:
3747 * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3748 * state data. Each subtransaction level that modifies that state first
3749 * saves a copy, which we use to restore the state if we abort.
3751 * events is a copy of the events head/tail pointers,
3752 * which we use to restore those values during subtransaction abort.
3754 * query_depth is the subtransaction-start-time value of query_depth,
3755 * which we similarly use to clean up at subtransaction abort.
3757 * firing_counter is the subtransaction-start-time value of firing_counter.
3758 * We use this to recognize which deferred triggers were fired (or marked
3759 * for firing) within an aborted subtransaction.
3761 * We use GetCurrentTransactionNestLevel() to determine the correct array
3762 * index in trans_stack. maxtransdepth is the number of allocated entries in
3763 * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3764 * in cases where errors during subxact abort cause multiple invocations
3765 * of AfterTriggerEndSubXact() at the same nesting depth.)
3767 * We create an AfterTriggersTableData struct for each target table of the
3768 * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3769 * either transition tables or statement-level triggers. This is used to
3770 * hold the relevant transition tables, as well as info tracking whether
3771 * we already queued the statement triggers. (We use that info to prevent
3772 * firing the same statement triggers more than once per statement, or really
3773 * once per transition table set.) These structs, along with the transition
3774 * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3775 * That's sufficient lifespan because we don't allow transition tables to be
3776 * used by deferrable triggers, so they only need to survive until
3777 * AfterTriggerEndQuery.
3779 typedef struct AfterTriggersQueryData AfterTriggersQueryData
;
3780 typedef struct AfterTriggersTransData AfterTriggersTransData
;
3781 typedef struct AfterTriggersTableData AfterTriggersTableData
;
3783 typedef struct AfterTriggersData
3785 CommandId firing_counter
; /* next firing ID to assign */
3786 SetConstraintState state
; /* the active S C state */
3787 AfterTriggerEventList events
; /* deferred-event list */
3788 MemoryContext event_cxt
; /* memory context for events, if any */
3790 /* per-query-level data: */
3791 AfterTriggersQueryData
*query_stack
; /* array of structs shown below */
3792 int query_depth
; /* current index in above array */
3793 int maxquerydepth
; /* allocated len of above array */
3795 /* per-subtransaction-level data: */
3796 AfterTriggersTransData
*trans_stack
; /* array of structs shown below */
3797 int maxtransdepth
; /* allocated len of above array */
3798 } AfterTriggersData
;
3800 struct AfterTriggersQueryData
3802 AfterTriggerEventList events
; /* events pending from this query */
3803 Tuplestorestate
*fdw_tuplestore
; /* foreign tuples for said events */
3804 List
*tables
; /* list of AfterTriggersTableData, see below */
3807 struct AfterTriggersTransData
3809 /* these fields are just for resetting at subtrans abort: */
3810 SetConstraintState state
; /* saved S C state, or NULL if not yet saved */
3811 AfterTriggerEventList events
; /* saved list pointer */
3812 int query_depth
; /* saved query_depth */
3813 CommandId firing_counter
; /* saved firing_counter */
3816 struct AfterTriggersTableData
3818 /* relid + cmdType form the lookup key for these structs: */
3819 Oid relid
; /* target table's OID */
3820 CmdType cmdType
; /* event type, CMD_INSERT/UPDATE/DELETE */
3821 bool closed
; /* true when no longer OK to add tuples */
3822 bool before_trig_done
; /* did we already queue BS triggers? */
3823 bool after_trig_done
; /* did we already queue AS triggers? */
3824 AfterTriggerEventList after_trig_events
; /* if so, saved list pointer */
3827 * We maintain separate transition tables for UPDATE/INSERT/DELETE since
3828 * MERGE can run all three actions in a single statement. Note that UPDATE
3829 * needs both old and new transition tables whereas INSERT needs only new,
3830 * and DELETE needs only old.
3833 /* "old" transition table for UPDATE, if any */
3834 Tuplestorestate
*old_upd_tuplestore
;
3835 /* "new" transition table for UPDATE, if any */
3836 Tuplestorestate
*new_upd_tuplestore
;
3837 /* "old" transition table for DELETE, if any */
3838 Tuplestorestate
*old_del_tuplestore
;
3839 /* "new" transition table for INSERT, if any */
3840 Tuplestorestate
*new_ins_tuplestore
;
3842 TupleTableSlot
*storeslot
; /* for converting to tuplestore's format */
3845 static AfterTriggersData afterTriggers
;
3847 static void AfterTriggerExecute(EState
*estate
,
3848 AfterTriggerEvent event
,
3849 ResultRelInfo
*relInfo
,
3850 ResultRelInfo
*src_relInfo
,
3851 ResultRelInfo
*dst_relInfo
,
3852 TriggerDesc
*trigdesc
,
3854 Instrumentation
*instr
,
3855 MemoryContext per_tuple_context
,
3856 TupleTableSlot
*trig_tuple_slot1
,
3857 TupleTableSlot
*trig_tuple_slot2
);
3858 static AfterTriggersTableData
*GetAfterTriggersTableData(Oid relid
,
3860 static TupleTableSlot
*GetAfterTriggersStoreSlot(AfterTriggersTableData
*table
,
3862 static Tuplestorestate
*GetAfterTriggersTransitionTable(int event
,
3863 TupleTableSlot
*oldslot
,
3864 TupleTableSlot
*newslot
,
3865 TransitionCaptureState
*transition_capture
);
3866 static void TransitionTableAddTuple(EState
*estate
,
3867 TransitionCaptureState
*transition_capture
,
3868 ResultRelInfo
*relinfo
,
3869 TupleTableSlot
*slot
,
3870 TupleTableSlot
*original_insert_tuple
,
3871 Tuplestorestate
*tuplestore
);
3872 static void AfterTriggerFreeQuery(AfterTriggersQueryData
*qs
);
3873 static SetConstraintState
SetConstraintStateCreate(int numalloc
);
3874 static SetConstraintState
SetConstraintStateCopy(SetConstraintState origstate
);
3875 static SetConstraintState
SetConstraintStateAddItem(SetConstraintState state
,
3876 Oid tgoid
, bool tgisdeferred
);
3877 static void cancel_prior_stmt_triggers(Oid relid
, CmdType cmdType
, int tgevent
);
3881 * Get the FDW tuplestore for the current trigger query level, creating it
3884 static Tuplestorestate
*
3885 GetCurrentFDWTuplestore(void)
3887 Tuplestorestate
*ret
;
3889 ret
= afterTriggers
.query_stack
[afterTriggers
.query_depth
].fdw_tuplestore
;
3892 MemoryContext oldcxt
;
3893 ResourceOwner saveResourceOwner
;
3896 * Make the tuplestore valid until end of subtransaction. We really
3897 * only need it until AfterTriggerEndQuery().
3899 oldcxt
= MemoryContextSwitchTo(CurTransactionContext
);
3900 saveResourceOwner
= CurrentResourceOwner
;
3901 CurrentResourceOwner
= CurTransactionResourceOwner
;
3903 ret
= tuplestore_begin_heap(false, false, work_mem
);
3905 CurrentResourceOwner
= saveResourceOwner
;
3906 MemoryContextSwitchTo(oldcxt
);
3908 afterTriggers
.query_stack
[afterTriggers
.query_depth
].fdw_tuplestore
= ret
;
3915 * afterTriggerCheckState()
3917 * Returns true if the trigger event is actually in state DEFERRED.
3921 afterTriggerCheckState(AfterTriggerShared evtshared
)
3923 Oid tgoid
= evtshared
->ats_tgoid
;
3924 SetConstraintState state
= afterTriggers
.state
;
3928 * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3929 * constraints declared NOT DEFERRABLE), the state is always false.
3931 if ((evtshared
->ats_event
& AFTER_TRIGGER_DEFERRABLE
) == 0)
3935 * If constraint state exists, SET CONSTRAINTS might have been executed
3936 * either for this trigger or for all triggers.
3940 /* Check for SET CONSTRAINTS for this specific trigger. */
3941 for (i
= 0; i
< state
->numstates
; i
++)
3943 if (state
->trigstates
[i
].sct_tgoid
== tgoid
)
3944 return state
->trigstates
[i
].sct_tgisdeferred
;
3947 /* Check for SET CONSTRAINTS ALL. */
3948 if (state
->all_isset
)
3949 return state
->all_isdeferred
;
3953 * Otherwise return the default state for the trigger.
3955 return ((evtshared
->ats_event
& AFTER_TRIGGER_INITDEFERRED
) != 0);
3960 * afterTriggerAddEvent()
3962 * Add a new trigger event to the specified queue.
3963 * The passed-in event data is copied.
3967 afterTriggerAddEvent(AfterTriggerEventList
*events
,
3968 AfterTriggerEvent event
, AfterTriggerShared evtshared
)
3970 Size eventsize
= SizeofTriggerEvent(event
);
3971 Size needed
= eventsize
+ sizeof(AfterTriggerSharedData
);
3972 AfterTriggerEventChunk
*chunk
;
3973 AfterTriggerShared newshared
;
3974 AfterTriggerEvent newevent
;
3977 * If empty list or not enough room in the tail chunk, make a new chunk.
3978 * We assume here that a new shared record will always be needed.
3980 chunk
= events
->tail
;
3981 if (chunk
== NULL
||
3982 chunk
->endfree
- chunk
->freeptr
< needed
)
3986 /* Create event context if we didn't already */
3987 if (afterTriggers
.event_cxt
== NULL
)
3988 afterTriggers
.event_cxt
=
3989 AllocSetContextCreate(TopTransactionContext
,
3990 "AfterTriggerEvents",
3991 ALLOCSET_DEFAULT_SIZES
);
3994 * Chunk size starts at 1KB and is allowed to increase up to 1MB.
3995 * These numbers are fairly arbitrary, though there is a hard limit at
3996 * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
3997 * shared records using the available space in ate_flags. Another
3998 * constraint is that if the chunk size gets too huge, the search loop
3999 * below would get slow given a (not too common) usage pattern with
4000 * many distinct event types in a chunk. Therefore, we double the
4001 * preceding chunk size only if there weren't too many shared records
4002 * in the preceding chunk; otherwise we halve it. This gives us some
4003 * ability to adapt to the actual usage pattern of the current query
4004 * while still having large chunk sizes in typical usage. All chunk
4005 * sizes used should be MAXALIGN multiples, to ensure that the shared
4006 * records will be aligned safely.
4008 #define MIN_CHUNK_SIZE 1024
4009 #define MAX_CHUNK_SIZE (1024*1024)
4011 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
4012 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
4016 chunksize
= MIN_CHUNK_SIZE
;
4019 /* preceding chunk size... */
4020 chunksize
= chunk
->endptr
- (char *) chunk
;
4021 /* check number of shared records in preceding chunk */
4022 if ((chunk
->endptr
- chunk
->endfree
) <=
4023 (100 * sizeof(AfterTriggerSharedData
)))
4024 chunksize
*= 2; /* okay, double it */
4026 chunksize
/= 2; /* too many shared records */
4027 chunksize
= Min(chunksize
, MAX_CHUNK_SIZE
);
4029 chunk
= MemoryContextAlloc(afterTriggers
.event_cxt
, chunksize
);
4031 chunk
->freeptr
= CHUNK_DATA_START(chunk
);
4032 chunk
->endptr
= chunk
->endfree
= (char *) chunk
+ chunksize
;
4033 Assert(chunk
->endfree
- chunk
->freeptr
>= needed
);
4035 if (events
->head
== NULL
)
4036 events
->head
= chunk
;
4038 events
->tail
->next
= chunk
;
4039 events
->tail
= chunk
;
4040 /* events->tailfree is now out of sync, but we'll fix it below */
4044 * Try to locate a matching shared-data record already in the chunk. If
4045 * none, make a new one.
4047 for (newshared
= ((AfterTriggerShared
) chunk
->endptr
) - 1;
4048 (char *) newshared
>= chunk
->endfree
;
4051 if (newshared
->ats_tgoid
== evtshared
->ats_tgoid
&&
4052 newshared
->ats_relid
== evtshared
->ats_relid
&&
4053 newshared
->ats_event
== evtshared
->ats_event
&&
4054 newshared
->ats_table
== evtshared
->ats_table
&&
4055 newshared
->ats_firing_id
== 0)
4058 if ((char *) newshared
< chunk
->endfree
)
4060 *newshared
= *evtshared
;
4061 newshared
->ats_firing_id
= 0; /* just to be sure */
4062 chunk
->endfree
= (char *) newshared
;
4065 /* Insert the data */
4066 newevent
= (AfterTriggerEvent
) chunk
->freeptr
;
4067 memcpy(newevent
, event
, eventsize
);
4068 /* ... and link the new event to its shared record */
4069 newevent
->ate_flags
&= ~AFTER_TRIGGER_OFFSET
;
4070 newevent
->ate_flags
|= (char *) newshared
- (char *) newevent
;
4072 chunk
->freeptr
+= eventsize
;
4073 events
->tailfree
= chunk
->freeptr
;
4077 * afterTriggerFreeEventList()
4079 * Free all the event storage in the given list.
4083 afterTriggerFreeEventList(AfterTriggerEventList
*events
)
4085 AfterTriggerEventChunk
*chunk
;
4087 while ((chunk
= events
->head
) != NULL
)
4089 events
->head
= chunk
->next
;
4092 events
->tail
= NULL
;
4093 events
->tailfree
= NULL
;
4097 * afterTriggerRestoreEventList()
4099 * Restore an event list to its prior length, removing all the events
4100 * added since it had the value old_events.
4104 afterTriggerRestoreEventList(AfterTriggerEventList
*events
,
4105 const AfterTriggerEventList
*old_events
)
4107 AfterTriggerEventChunk
*chunk
;
4108 AfterTriggerEventChunk
*next_chunk
;
4110 if (old_events
->tail
== NULL
)
4112 /* restoring to a completely empty state, so free everything */
4113 afterTriggerFreeEventList(events
);
4117 *events
= *old_events
;
4118 /* free any chunks after the last one we want to keep */
4119 for (chunk
= events
->tail
->next
; chunk
!= NULL
; chunk
= next_chunk
)
4121 next_chunk
= chunk
->next
;
4124 /* and clean up the tail chunk to be the right length */
4125 events
->tail
->next
= NULL
;
4126 events
->tail
->freeptr
= events
->tailfree
;
4129 * We don't make any effort to remove now-unused shared data records.
4130 * They might still be useful, anyway.
4136 * afterTriggerDeleteHeadEventChunk()
4138 * Remove the first chunk of events from the query level's event list.
4139 * Keep any event list pointers elsewhere in the query level's data
4140 * structures in sync.
4144 afterTriggerDeleteHeadEventChunk(AfterTriggersQueryData
*qs
)
4146 AfterTriggerEventChunk
*target
= qs
->events
.head
;
4149 Assert(target
&& target
->next
);
4152 * First, update any pointers in the per-table data, so that they won't be
4153 * dangling. Resetting obsoleted pointers to NULL will make
4154 * cancel_prior_stmt_triggers start from the list head, which is fine.
4156 foreach(lc
, qs
->tables
)
4158 AfterTriggersTableData
*table
= (AfterTriggersTableData
*) lfirst(lc
);
4160 if (table
->after_trig_done
&&
4161 table
->after_trig_events
.tail
== target
)
4163 table
->after_trig_events
.head
= NULL
;
4164 table
->after_trig_events
.tail
= NULL
;
4165 table
->after_trig_events
.tailfree
= NULL
;
4169 /* Now we can flush the head chunk */
4170 qs
->events
.head
= target
->next
;
4176 * AfterTriggerExecute()
4178 * Fetch the required tuples back from the heap and fire one
4179 * single trigger function.
4181 * Frequently, this will be fired many times in a row for triggers of
4182 * a single relation. Therefore, we cache the open relation and provide
4183 * fmgr lookup cache space at the caller level. (For triggers fired at
4184 * the end of a query, we can even piggyback on the executor's state.)
4186 * When fired for a cross-partition update of a partitioned table, the old
4187 * tuple is fetched using 'src_relInfo' (the source leaf partition) and
4188 * the new tuple using 'dst_relInfo' (the destination leaf partition), though
4189 * both are converted into the root partitioned table's format before passing
4190 * to the trigger function.
4192 * event: event currently being fired.
4193 * relInfo: result relation for event.
4194 * src_relInfo: source partition of a cross-partition update
4195 * dst_relInfo: its destination partition
4196 * trigdesc: working copy of rel's trigger info.
4197 * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
4198 * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
4199 * or NULL if no instrumentation is wanted.
4200 * per_tuple_context: memory context to call trigger function in.
4201 * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
4202 * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
4206 AfterTriggerExecute(EState
*estate
,
4207 AfterTriggerEvent event
,
4208 ResultRelInfo
*relInfo
,
4209 ResultRelInfo
*src_relInfo
,
4210 ResultRelInfo
*dst_relInfo
,
4211 TriggerDesc
*trigdesc
,
4212 FmgrInfo
*finfo
, Instrumentation
*instr
,
4213 MemoryContext per_tuple_context
,
4214 TupleTableSlot
*trig_tuple_slot1
,
4215 TupleTableSlot
*trig_tuple_slot2
)
4217 Relation rel
= relInfo
->ri_RelationDesc
;
4218 Relation src_rel
= src_relInfo
->ri_RelationDesc
;
4219 Relation dst_rel
= dst_relInfo
->ri_RelationDesc
;
4220 AfterTriggerShared evtshared
= GetTriggerSharedData(event
);
4221 Oid tgoid
= evtshared
->ats_tgoid
;
4222 TriggerData LocTriggerData
= {0};
4225 bool should_free_trig
= false;
4226 bool should_free_new
= false;
4229 * Locate trigger in trigdesc.
4231 for (tgindx
= 0; tgindx
< trigdesc
->numtriggers
; tgindx
++)
4233 if (trigdesc
->triggers
[tgindx
].tgoid
== tgoid
)
4235 LocTriggerData
.tg_trigger
= &(trigdesc
->triggers
[tgindx
]);
4239 if (LocTriggerData
.tg_trigger
== NULL
)
4240 elog(ERROR
, "could not find trigger %u", tgoid
);
4243 * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
4244 * to include time spent re-fetching tuples in the trigger cost.
4247 InstrStartNode(instr
+ tgindx
);
4250 * Fetch the required tuple(s).
4252 switch (event
->ate_flags
& AFTER_TRIGGER_TUP_BITS
)
4254 case AFTER_TRIGGER_FDW_FETCH
:
4256 Tuplestorestate
*fdw_tuplestore
= GetCurrentFDWTuplestore();
4258 if (!tuplestore_gettupleslot(fdw_tuplestore
, true, false,
4260 elog(ERROR
, "failed to fetch tuple1 for AFTER trigger");
4262 if ((evtshared
->ats_event
& TRIGGER_EVENT_OPMASK
) ==
4263 TRIGGER_EVENT_UPDATE
&&
4264 !tuplestore_gettupleslot(fdw_tuplestore
, true, false,
4266 elog(ERROR
, "failed to fetch tuple2 for AFTER trigger");
4269 case AFTER_TRIGGER_FDW_REUSE
:
4272 * Store tuple in the slot so that tg_trigtuple does not reference
4273 * tuplestore memory. (It is formally possible for the trigger
4274 * function to queue trigger events that add to the same
4275 * tuplestore, which can push other tuples out of memory.) The
4276 * distinction is academic, because we start with a minimal tuple
4277 * that is stored as a heap tuple, constructed in different memory
4278 * context, in the slot anyway.
4280 LocTriggerData
.tg_trigslot
= trig_tuple_slot1
;
4281 LocTriggerData
.tg_trigtuple
=
4282 ExecFetchSlotHeapTuple(trig_tuple_slot1
, true, &should_free_trig
);
4284 if ((evtshared
->ats_event
& TRIGGER_EVENT_OPMASK
) ==
4285 TRIGGER_EVENT_UPDATE
)
4287 LocTriggerData
.tg_newslot
= trig_tuple_slot2
;
4288 LocTriggerData
.tg_newtuple
=
4289 ExecFetchSlotHeapTuple(trig_tuple_slot2
, true, &should_free_new
);
4293 LocTriggerData
.tg_newtuple
= NULL
;
4298 if (ItemPointerIsValid(&(event
->ate_ctid1
)))
4300 TupleTableSlot
*src_slot
= ExecGetTriggerOldSlot(estate
,
4303 if (!table_tuple_fetch_row_version(src_rel
,
4304 &(event
->ate_ctid1
),
4307 elog(ERROR
, "failed to fetch tuple1 for AFTER trigger");
4310 * Store the tuple fetched from the source partition into the
4311 * target (root partitioned) table slot, converting if needed.
4313 if (src_relInfo
!= relInfo
)
4315 TupleConversionMap
*map
= ExecGetChildToRootMap(src_relInfo
);
4317 LocTriggerData
.tg_trigslot
= ExecGetTriggerOldSlot(estate
, relInfo
);
4320 execute_attr_map_slot(map
->attrMap
,
4322 LocTriggerData
.tg_trigslot
);
4325 ExecCopySlot(LocTriggerData
.tg_trigslot
, src_slot
);
4328 LocTriggerData
.tg_trigslot
= src_slot
;
4329 LocTriggerData
.tg_trigtuple
=
4330 ExecFetchSlotHeapTuple(LocTriggerData
.tg_trigslot
, false, &should_free_trig
);
4334 LocTriggerData
.tg_trigtuple
= NULL
;
4337 /* don't touch ctid2 if not there */
4338 if (((event
->ate_flags
& AFTER_TRIGGER_TUP_BITS
) == AFTER_TRIGGER_2CTID
||
4339 (event
->ate_flags
& AFTER_TRIGGER_CP_UPDATE
)) &&
4340 ItemPointerIsValid(&(event
->ate_ctid2
)))
4342 TupleTableSlot
*dst_slot
= ExecGetTriggerNewSlot(estate
,
4345 if (!table_tuple_fetch_row_version(dst_rel
,
4346 &(event
->ate_ctid2
),
4349 elog(ERROR
, "failed to fetch tuple2 for AFTER trigger");
4352 * Store the tuple fetched from the destination partition into
4353 * the target (root partitioned) table slot, converting if
4356 if (dst_relInfo
!= relInfo
)
4358 TupleConversionMap
*map
= ExecGetChildToRootMap(dst_relInfo
);
4360 LocTriggerData
.tg_newslot
= ExecGetTriggerNewSlot(estate
, relInfo
);
4363 execute_attr_map_slot(map
->attrMap
,
4365 LocTriggerData
.tg_newslot
);
4368 ExecCopySlot(LocTriggerData
.tg_newslot
, dst_slot
);
4371 LocTriggerData
.tg_newslot
= dst_slot
;
4372 LocTriggerData
.tg_newtuple
=
4373 ExecFetchSlotHeapTuple(LocTriggerData
.tg_newslot
, false, &should_free_new
);
4377 LocTriggerData
.tg_newtuple
= NULL
;
4382 * Set up the tuplestore information to let the trigger have access to
4383 * transition tables. When we first make a transition table available to
4384 * a trigger, mark it "closed" so that it cannot change anymore. If any
4385 * additional events of the same type get queued in the current trigger
4386 * query level, they'll go into new transition tables.
4388 LocTriggerData
.tg_oldtable
= LocTriggerData
.tg_newtable
= NULL
;
4389 if (evtshared
->ats_table
)
4391 if (LocTriggerData
.tg_trigger
->tgoldtable
)
4393 if (TRIGGER_FIRED_BY_UPDATE(evtshared
->ats_event
))
4394 LocTriggerData
.tg_oldtable
= evtshared
->ats_table
->old_upd_tuplestore
;
4396 LocTriggerData
.tg_oldtable
= evtshared
->ats_table
->old_del_tuplestore
;
4397 evtshared
->ats_table
->closed
= true;
4400 if (LocTriggerData
.tg_trigger
->tgnewtable
)
4402 if (TRIGGER_FIRED_BY_INSERT(evtshared
->ats_event
))
4403 LocTriggerData
.tg_newtable
= evtshared
->ats_table
->new_ins_tuplestore
;
4405 LocTriggerData
.tg_newtable
= evtshared
->ats_table
->new_upd_tuplestore
;
4406 evtshared
->ats_table
->closed
= true;
4411 * Setup the remaining trigger information
4413 LocTriggerData
.type
= T_TriggerData
;
4414 LocTriggerData
.tg_event
=
4415 evtshared
->ats_event
& (TRIGGER_EVENT_OPMASK
| TRIGGER_EVENT_ROW
);
4416 LocTriggerData
.tg_relation
= rel
;
4417 if (TRIGGER_FOR_UPDATE(LocTriggerData
.tg_trigger
->tgtype
))
4418 LocTriggerData
.tg_updatedcols
= evtshared
->ats_modifiedcols
;
4420 MemoryContextReset(per_tuple_context
);
4423 * Call the trigger and throw away any possibly returned updated tuple.
4424 * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4426 rettuple
= ExecCallTriggerFunc(&LocTriggerData
,
4431 if (rettuple
!= NULL
&&
4432 rettuple
!= LocTriggerData
.tg_trigtuple
&&
4433 rettuple
!= LocTriggerData
.tg_newtuple
)
4434 heap_freetuple(rettuple
);
4439 if (should_free_trig
)
4440 heap_freetuple(LocTriggerData
.tg_trigtuple
);
4441 if (should_free_new
)
4442 heap_freetuple(LocTriggerData
.tg_newtuple
);
4444 /* don't clear slots' contents if foreign table */
4445 if (trig_tuple_slot1
== NULL
)
4447 if (LocTriggerData
.tg_trigslot
)
4448 ExecClearTuple(LocTriggerData
.tg_trigslot
);
4449 if (LocTriggerData
.tg_newslot
)
4450 ExecClearTuple(LocTriggerData
.tg_newslot
);
4454 * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4455 * one "tuple returned" (really the number of firings).
4458 InstrStopNode(instr
+ tgindx
, 1);
4463 * afterTriggerMarkEvents()
4465 * Scan the given event list for not yet invoked events. Mark the ones
4466 * that can be invoked now with the current firing ID.
4468 * If move_list isn't NULL, events that are not to be invoked now are
4469 * transferred to move_list.
4471 * When immediate_only is true, do not invoke currently-deferred triggers.
4472 * (This will be false only at main transaction exit.)
4474 * Returns true if any invokable events were found.
4477 afterTriggerMarkEvents(AfterTriggerEventList
*events
,
4478 AfterTriggerEventList
*move_list
,
4479 bool immediate_only
)
4482 bool deferred_found
= false;
4483 AfterTriggerEvent event
;
4484 AfterTriggerEventChunk
*chunk
;
4486 for_each_event_chunk(event
, chunk
, *events
)
4488 AfterTriggerShared evtshared
= GetTriggerSharedData(event
);
4489 bool defer_it
= false;
4491 if (!(event
->ate_flags
&
4492 (AFTER_TRIGGER_DONE
| AFTER_TRIGGER_IN_PROGRESS
)))
4495 * This trigger hasn't been called or scheduled yet. Check if we
4496 * should call it now.
4498 if (immediate_only
&& afterTriggerCheckState(evtshared
))
4505 * Mark it as to be fired in this firing cycle.
4507 evtshared
->ats_firing_id
= afterTriggers
.firing_counter
;
4508 event
->ate_flags
|= AFTER_TRIGGER_IN_PROGRESS
;
4514 * If it's deferred, move it to move_list, if requested.
4516 if (defer_it
&& move_list
!= NULL
)
4518 deferred_found
= true;
4519 /* add it to move_list */
4520 afterTriggerAddEvent(move_list
, event
, evtshared
);
4521 /* mark original copy "done" so we don't do it again */
4522 event
->ate_flags
|= AFTER_TRIGGER_DONE
;
4527 * We could allow deferred triggers if, before the end of the
4528 * security-restricted operation, we were to verify that a SET CONSTRAINTS
4529 * ... IMMEDIATE has fired all such triggers. For now, don't bother.
4531 if (deferred_found
&& InSecurityRestrictedOperation())
4533 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE
),
4534 errmsg("cannot fire deferred trigger within security-restricted operation")));
4540 * afterTriggerInvokeEvents()
4542 * Scan the given event list for events that are marked as to be fired
4543 * in the current firing cycle, and fire them.
4545 * If estate isn't NULL, we use its result relation info to avoid repeated
4546 * openings and closing of trigger target relations. If it is NULL, we
4547 * make one locally to cache the info in case there are multiple trigger
4550 * When delete_ok is true, it's safe to delete fully-processed events.
4551 * (We are not very tense about that: we simply reset a chunk to be empty
4552 * if all its events got fired. The objective here is just to avoid useless
4553 * rescanning of events when a trigger queues new events during transaction
4554 * end, so it's not necessary to worry much about the case where only
4555 * some events are fired.)
4557 * Returns true if no unfired events remain in the list (this allows us
4558 * to avoid repeating afterTriggerMarkEvents).
4561 afterTriggerInvokeEvents(AfterTriggerEventList
*events
,
4562 CommandId firing_id
,
4566 bool all_fired
= true;
4567 AfterTriggerEventChunk
*chunk
;
4568 MemoryContext per_tuple_context
;
4569 bool local_estate
= false;
4570 ResultRelInfo
*rInfo
= NULL
;
4571 Relation rel
= NULL
;
4572 TriggerDesc
*trigdesc
= NULL
;
4573 FmgrInfo
*finfo
= NULL
;
4574 Instrumentation
*instr
= NULL
;
4575 TupleTableSlot
*slot1
= NULL
,
4578 /* Make a local EState if need be */
4581 estate
= CreateExecutorState();
4582 local_estate
= true;
4585 /* Make a per-tuple memory context for trigger function calls */
4587 AllocSetContextCreate(CurrentMemoryContext
,
4588 "AfterTriggerTupleContext",
4589 ALLOCSET_DEFAULT_SIZES
);
4591 for_each_chunk(chunk
, *events
)
4593 AfterTriggerEvent event
;
4594 bool all_fired_in_chunk
= true;
4596 for_each_event(event
, chunk
)
4598 AfterTriggerShared evtshared
= GetTriggerSharedData(event
);
4601 * Is it one for me to fire?
4603 if ((event
->ate_flags
& AFTER_TRIGGER_IN_PROGRESS
) &&
4604 evtshared
->ats_firing_id
== firing_id
)
4606 ResultRelInfo
*src_rInfo
,
4610 * So let's fire it... but first, find the correct relation if
4611 * this is not the same relation as before.
4613 if (rel
== NULL
|| RelationGetRelid(rel
) != evtshared
->ats_relid
)
4615 rInfo
= ExecGetTriggerResultRel(estate
, evtshared
->ats_relid
,
4617 rel
= rInfo
->ri_RelationDesc
;
4618 /* Catch calls with insufficient relcache refcounting */
4619 Assert(!RelationHasReferenceCountZero(rel
));
4620 trigdesc
= rInfo
->ri_TrigDesc
;
4621 finfo
= rInfo
->ri_TrigFunctions
;
4622 instr
= rInfo
->ri_TrigInstrument
;
4625 ExecDropSingleTupleTableSlot(slot1
);
4626 ExecDropSingleTupleTableSlot(slot2
);
4627 slot1
= slot2
= NULL
;
4629 if (rel
->rd_rel
->relkind
== RELKIND_FOREIGN_TABLE
)
4631 slot1
= MakeSingleTupleTableSlot(rel
->rd_att
,
4632 &TTSOpsMinimalTuple
);
4633 slot2
= MakeSingleTupleTableSlot(rel
->rd_att
,
4634 &TTSOpsMinimalTuple
);
4636 if (trigdesc
== NULL
) /* should not happen */
4637 elog(ERROR
, "relation %u has no triggers",
4638 evtshared
->ats_relid
);
4642 * Look up source and destination partition result rels of a
4643 * cross-partition update event.
4645 if ((event
->ate_flags
& AFTER_TRIGGER_TUP_BITS
) ==
4646 AFTER_TRIGGER_CP_UPDATE
)
4648 Assert(OidIsValid(event
->ate_src_part
) &&
4649 OidIsValid(event
->ate_dst_part
));
4650 src_rInfo
= ExecGetTriggerResultRel(estate
,
4651 event
->ate_src_part
,
4653 dst_rInfo
= ExecGetTriggerResultRel(estate
,
4654 event
->ate_dst_part
,
4658 src_rInfo
= dst_rInfo
= rInfo
;
4661 * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4662 * still set, so recursive examinations of the event list
4663 * won't try to re-fire it.
4665 AfterTriggerExecute(estate
, event
, rInfo
,
4666 src_rInfo
, dst_rInfo
,
4667 trigdesc
, finfo
, instr
,
4668 per_tuple_context
, slot1
, slot2
);
4671 * Mark the event as done.
4673 event
->ate_flags
&= ~AFTER_TRIGGER_IN_PROGRESS
;
4674 event
->ate_flags
|= AFTER_TRIGGER_DONE
;
4676 else if (!(event
->ate_flags
& AFTER_TRIGGER_DONE
))
4678 /* something remains to be done */
4679 all_fired
= all_fired_in_chunk
= false;
4683 /* Clear the chunk if delete_ok and nothing left of interest */
4684 if (delete_ok
&& all_fired_in_chunk
)
4686 chunk
->freeptr
= CHUNK_DATA_START(chunk
);
4687 chunk
->endfree
= chunk
->endptr
;
4690 * If it's last chunk, must sync event list's tailfree too. Note
4691 * that delete_ok must NOT be passed as true if there could be
4692 * additional AfterTriggerEventList values pointing at this event
4693 * list, since we'd fail to fix their copies of tailfree.
4695 if (chunk
== events
->tail
)
4696 events
->tailfree
= chunk
->freeptr
;
4701 ExecDropSingleTupleTableSlot(slot1
);
4702 ExecDropSingleTupleTableSlot(slot2
);
4705 /* Release working resources */
4706 MemoryContextDelete(per_tuple_context
);
4710 ExecCloseResultRelations(estate
);
4711 ExecResetTupleTable(estate
->es_tupleTable
, false);
4712 FreeExecutorState(estate
);
4720 * GetAfterTriggersTableData
4722 * Find or create an AfterTriggersTableData struct for the specified
4723 * trigger event (relation + operation type). Ignore existing structs
4724 * marked "closed"; we don't want to put any additional tuples into them,
4725 * nor change their stmt-triggers-fired state.
4727 * Note: the AfterTriggersTableData list is allocated in the current
4728 * (sub)transaction's CurTransactionContext. This is OK because
4729 * we don't need it to live past AfterTriggerEndQuery.
4731 static AfterTriggersTableData
*
4732 GetAfterTriggersTableData(Oid relid
, CmdType cmdType
)
4734 AfterTriggersTableData
*table
;
4735 AfterTriggersQueryData
*qs
;
4736 MemoryContext oldcxt
;
4739 /* Caller should have ensured query_depth is OK. */
4740 Assert(afterTriggers
.query_depth
>= 0 &&
4741 afterTriggers
.query_depth
< afterTriggers
.maxquerydepth
);
4742 qs
= &afterTriggers
.query_stack
[afterTriggers
.query_depth
];
4744 foreach(lc
, qs
->tables
)
4746 table
= (AfterTriggersTableData
*) lfirst(lc
);
4747 if (table
->relid
== relid
&& table
->cmdType
== cmdType
&&
4752 oldcxt
= MemoryContextSwitchTo(CurTransactionContext
);
4754 table
= (AfterTriggersTableData
*) palloc0(sizeof(AfterTriggersTableData
));
4755 table
->relid
= relid
;
4756 table
->cmdType
= cmdType
;
4757 qs
->tables
= lappend(qs
->tables
, table
);
4759 MemoryContextSwitchTo(oldcxt
);
4765 * Returns a TupleTableSlot suitable for holding the tuples to be put
4766 * into AfterTriggersTableData's transition table tuplestores.
4768 static TupleTableSlot
*
4769 GetAfterTriggersStoreSlot(AfterTriggersTableData
*table
,
4772 /* Create it if not already done. */
4773 if (!table
->storeslot
)
4775 MemoryContext oldcxt
;
4778 * We only need this slot only until AfterTriggerEndQuery, but making
4779 * it last till end-of-subxact is good enough. It'll be freed by
4780 * AfterTriggerFreeQuery().
4782 oldcxt
= MemoryContextSwitchTo(CurTransactionContext
);
4783 table
->storeslot
= MakeSingleTupleTableSlot(tupdesc
, &TTSOpsVirtual
);
4784 MemoryContextSwitchTo(oldcxt
);
4787 return table
->storeslot
;
4791 * MakeTransitionCaptureState
4793 * Make a TransitionCaptureState object for the given TriggerDesc, target
4794 * relation, and operation type. The TCS object holds all the state needed
4795 * to decide whether to capture tuples in transition tables.
4797 * If there are no triggers in 'trigdesc' that request relevant transition
4798 * tables, then return NULL.
4800 * The resulting object can be passed to the ExecAR* functions. When
4801 * dealing with child tables, the caller can set tcs_original_insert_tuple
4802 * to avoid having to reconstruct the original tuple in the root table's
4805 * Note that we copy the flags from a parent table into this struct (rather
4806 * than subsequently using the relation's TriggerDesc directly) so that we can
4807 * use it to control collection of transition tuples from child tables.
4809 * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4810 * on the same table during one query should share one transition table.
4811 * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4812 * looked up using the table OID + CmdType, and are merely referenced by
4813 * the TransitionCaptureState objects we hand out to callers.
4815 TransitionCaptureState
*
4816 MakeTransitionCaptureState(TriggerDesc
*trigdesc
, Oid relid
, CmdType cmdType
)
4818 TransitionCaptureState
*state
;
4823 AfterTriggersTableData
*table
;
4824 MemoryContext oldcxt
;
4825 ResourceOwner saveResourceOwner
;
4827 if (trigdesc
== NULL
)
4830 /* Detect which table(s) we need. */
4834 need_old_upd
= need_old_del
= need_new_upd
= false;
4835 need_new_ins
= trigdesc
->trig_insert_new_table
;
4838 need_old_upd
= trigdesc
->trig_update_old_table
;
4839 need_new_upd
= trigdesc
->trig_update_new_table
;
4840 need_old_del
= need_new_ins
= false;
4843 need_old_del
= trigdesc
->trig_delete_old_table
;
4844 need_old_upd
= need_new_upd
= need_new_ins
= false;
4847 need_old_upd
= trigdesc
->trig_update_old_table
;
4848 need_new_upd
= trigdesc
->trig_update_new_table
;
4849 need_old_del
= trigdesc
->trig_delete_old_table
;
4850 need_new_ins
= trigdesc
->trig_insert_new_table
;
4853 elog(ERROR
, "unexpected CmdType: %d", (int) cmdType
);
4854 /* keep compiler quiet */
4855 need_old_upd
= need_new_upd
= need_old_del
= need_new_ins
= false;
4858 if (!need_old_upd
&& !need_new_upd
&& !need_new_ins
&& !need_old_del
)
4861 /* Check state, like AfterTriggerSaveEvent. */
4862 if (afterTriggers
.query_depth
< 0)
4863 elog(ERROR
, "MakeTransitionCaptureState() called outside of query");
4865 /* Be sure we have enough space to record events at this query depth. */
4866 if (afterTriggers
.query_depth
>= afterTriggers
.maxquerydepth
)
4867 AfterTriggerEnlargeQueryState();
4870 * Find or create an AfterTriggersTableData struct to hold the
4871 * tuplestore(s). If there's a matching struct but it's marked closed,
4872 * ignore it; we need a newer one.
4874 * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4875 * allocated in the current (sub)transaction's CurTransactionContext, and
4876 * the tuplestores are managed by the (sub)transaction's resource owner.
4877 * This is sufficient lifespan because we do not allow triggers using
4878 * transition tables to be deferrable; they will be fired during
4879 * AfterTriggerEndQuery, after which it's okay to delete the data.
4881 table
= GetAfterTriggersTableData(relid
, cmdType
);
4883 /* Now create required tuplestore(s), if we don't have them already. */
4884 oldcxt
= MemoryContextSwitchTo(CurTransactionContext
);
4885 saveResourceOwner
= CurrentResourceOwner
;
4886 CurrentResourceOwner
= CurTransactionResourceOwner
;
4888 if (need_old_upd
&& table
->old_upd_tuplestore
== NULL
)
4889 table
->old_upd_tuplestore
= tuplestore_begin_heap(false, false, work_mem
);
4890 if (need_new_upd
&& table
->new_upd_tuplestore
== NULL
)
4891 table
->new_upd_tuplestore
= tuplestore_begin_heap(false, false, work_mem
);
4892 if (need_old_del
&& table
->old_del_tuplestore
== NULL
)
4893 table
->old_del_tuplestore
= tuplestore_begin_heap(false, false, work_mem
);
4894 if (need_new_ins
&& table
->new_ins_tuplestore
== NULL
)
4895 table
->new_ins_tuplestore
= tuplestore_begin_heap(false, false, work_mem
);
4897 CurrentResourceOwner
= saveResourceOwner
;
4898 MemoryContextSwitchTo(oldcxt
);
4900 /* Now build the TransitionCaptureState struct, in caller's context */
4901 state
= (TransitionCaptureState
*) palloc0(sizeof(TransitionCaptureState
));
4902 state
->tcs_delete_old_table
= trigdesc
->trig_delete_old_table
;
4903 state
->tcs_update_old_table
= trigdesc
->trig_update_old_table
;
4904 state
->tcs_update_new_table
= trigdesc
->trig_update_new_table
;
4905 state
->tcs_insert_new_table
= trigdesc
->trig_insert_new_table
;
4906 state
->tcs_private
= table
;
4913 * AfterTriggerBeginXact()
4915 * Called at transaction start (either BEGIN or implicit for single
4916 * statement outside of transaction block).
4920 AfterTriggerBeginXact(void)
4923 * Initialize after-trigger state structure to empty
4925 afterTriggers
.firing_counter
= (CommandId
) 1; /* mustn't be 0 */
4926 afterTriggers
.query_depth
= -1;
4929 * Verify that there is no leftover state remaining. If these assertions
4930 * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4933 Assert(afterTriggers
.state
== NULL
);
4934 Assert(afterTriggers
.query_stack
== NULL
);
4935 Assert(afterTriggers
.maxquerydepth
== 0);
4936 Assert(afterTriggers
.event_cxt
== NULL
);
4937 Assert(afterTriggers
.events
.head
== NULL
);
4938 Assert(afterTriggers
.trans_stack
== NULL
);
4939 Assert(afterTriggers
.maxtransdepth
== 0);
4944 * AfterTriggerBeginQuery()
4946 * Called just before we start processing a single query within a
4947 * transaction (or subtransaction). Most of the real work gets deferred
4948 * until somebody actually tries to queue a trigger event.
4952 AfterTriggerBeginQuery(void)
4954 /* Increase the query stack depth */
4955 afterTriggers
.query_depth
++;
4960 * AfterTriggerEndQuery()
4962 * Called after one query has been completely processed. At this time
4963 * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
4964 * transfer deferred trigger events to the global deferred-trigger list.
4966 * Note that this must be called BEFORE closing down the executor
4967 * with ExecutorEnd, because we make use of the EState's info about
4968 * target relations. Normally it is called from ExecutorFinish.
4972 AfterTriggerEndQuery(EState
*estate
)
4974 AfterTriggersQueryData
*qs
;
4976 /* Must be inside a query, too */
4977 Assert(afterTriggers
.query_depth
>= 0);
4980 * If we never even got as far as initializing the event stack, there
4981 * certainly won't be any events, so exit quickly.
4983 if (afterTriggers
.query_depth
>= afterTriggers
.maxquerydepth
)
4985 afterTriggers
.query_depth
--;
4990 * Process all immediate-mode triggers queued by the query, and move the
4991 * deferred ones to the main list of deferred events.
4993 * Notice that we decide which ones will be fired, and put the deferred
4994 * ones on the main list, before anything is actually fired. This ensures
4995 * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
4996 * IMMEDIATE: all events we have decided to defer will be available for it
4999 * We loop in case a trigger queues more events at the same query level.
5000 * Ordinary trigger functions, including all PL/pgSQL trigger functions,
5001 * will instead fire any triggers in a dedicated query level. Foreign key
5002 * enforcement triggers do add to the current query level, thanks to their
5003 * passing fire_triggers = false to SPI_execute_snapshot(). Other
5004 * C-language triggers might do likewise.
5006 * If we find no firable events, we don't have to increment
5009 qs
= &afterTriggers
.query_stack
[afterTriggers
.query_depth
];
5013 if (afterTriggerMarkEvents(&qs
->events
, &afterTriggers
.events
, true))
5015 CommandId firing_id
= afterTriggers
.firing_counter
++;
5016 AfterTriggerEventChunk
*oldtail
= qs
->events
.tail
;
5018 if (afterTriggerInvokeEvents(&qs
->events
, firing_id
, estate
, false))
5019 break; /* all fired */
5022 * Firing a trigger could result in query_stack being repalloc'd,
5023 * so we must recalculate qs after each afterTriggerInvokeEvents
5024 * call. Furthermore, it's unsafe to pass delete_ok = true here,
5025 * because that could cause afterTriggerInvokeEvents to try to
5026 * access qs->events after the stack has been repalloc'd.
5028 qs
= &afterTriggers
.query_stack
[afterTriggers
.query_depth
];
5031 * We'll need to scan the events list again. To reduce the cost
5032 * of doing so, get rid of completely-fired chunks. We know that
5033 * all events were marked IN_PROGRESS or DONE at the conclusion of
5034 * afterTriggerMarkEvents, so any still-interesting events must
5035 * have been added after that, and so must be in the chunk that
5036 * was then the tail chunk, or in later chunks. So, zap all
5037 * chunks before oldtail. This is approximately the same set of
5038 * events we would have gotten rid of by passing delete_ok = true.
5040 Assert(oldtail
!= NULL
);
5041 while (qs
->events
.head
!= oldtail
)
5042 afterTriggerDeleteHeadEventChunk(qs
);
5048 /* Release query-level-local storage, including tuplestores if any */
5049 AfterTriggerFreeQuery(&afterTriggers
.query_stack
[afterTriggers
.query_depth
]);
5051 afterTriggers
.query_depth
--;
5056 * AfterTriggerFreeQuery
5057 * Release subsidiary storage for a trigger query level.
5058 * This includes closing down tuplestores.
5059 * Note: it's important for this to be safe if interrupted by an error
5060 * and then called again for the same query level.
5063 AfterTriggerFreeQuery(AfterTriggersQueryData
*qs
)
5065 Tuplestorestate
*ts
;
5069 /* Drop the trigger events */
5070 afterTriggerFreeEventList(&qs
->events
);
5072 /* Drop FDW tuplestore if any */
5073 ts
= qs
->fdw_tuplestore
;
5074 qs
->fdw_tuplestore
= NULL
;
5078 /* Release per-table subsidiary storage */
5079 tables
= qs
->tables
;
5082 AfterTriggersTableData
*table
= (AfterTriggersTableData
*) lfirst(lc
);
5084 ts
= table
->old_upd_tuplestore
;
5085 table
->old_upd_tuplestore
= NULL
;
5088 ts
= table
->new_upd_tuplestore
;
5089 table
->new_upd_tuplestore
= NULL
;
5092 ts
= table
->old_del_tuplestore
;
5093 table
->old_del_tuplestore
= NULL
;
5096 ts
= table
->new_ins_tuplestore
;
5097 table
->new_ins_tuplestore
= NULL
;
5100 if (table
->storeslot
)
5101 ExecDropSingleTupleTableSlot(table
->storeslot
);
5105 * Now free the AfterTriggersTableData structs and list cells. Reset list
5106 * pointer first; if list_free_deep somehow gets an error, better to leak
5107 * that storage than have an infinite loop.
5110 list_free_deep(tables
);
5115 * AfterTriggerFireDeferred()
5117 * Called just before the current transaction is committed. At this
5118 * time we invoke all pending DEFERRED triggers.
5120 * It is possible for other modules to queue additional deferred triggers
5121 * during pre-commit processing; therefore xact.c may have to call this
5126 AfterTriggerFireDeferred(void)
5128 AfterTriggerEventList
*events
;
5129 bool snap_pushed
= false;
5131 /* Must not be inside a query */
5132 Assert(afterTriggers
.query_depth
== -1);
5135 * If there are any triggers to fire, make sure we have set a snapshot for
5136 * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
5137 * can't assume ActiveSnapshot is valid on entry.)
5139 events
= &afterTriggers
.events
;
5140 if (events
->head
!= NULL
)
5142 PushActiveSnapshot(GetTransactionSnapshot());
5147 * Run all the remaining triggers. Loop until they are all gone, in case
5148 * some trigger queues more for us to do.
5150 while (afterTriggerMarkEvents(events
, NULL
, false))
5152 CommandId firing_id
= afterTriggers
.firing_counter
++;
5154 if (afterTriggerInvokeEvents(events
, firing_id
, NULL
, true))
5155 break; /* all fired */
5159 * We don't bother freeing the event list, since it will go away anyway
5160 * (and more efficiently than via pfree) in AfterTriggerEndXact.
5164 PopActiveSnapshot();
5169 * AfterTriggerEndXact()
5171 * The current transaction is finishing.
5173 * Any unfired triggers are canceled so we simply throw
5174 * away anything we know.
5176 * Note: it is possible for this to be called repeatedly in case of
5177 * error during transaction abort; therefore, do not complain if
5178 * already closed down.
5182 AfterTriggerEndXact(bool isCommit
)
5185 * Forget the pending-events list.
5187 * Since all the info is in TopTransactionContext or children thereof, we
5188 * don't really need to do anything to reclaim memory. However, the
5189 * pending-events list could be large, and so it's useful to discard it as
5190 * soon as possible --- especially if we are aborting because we ran out
5191 * of memory for the list!
5193 if (afterTriggers
.event_cxt
)
5195 MemoryContextDelete(afterTriggers
.event_cxt
);
5196 afterTriggers
.event_cxt
= NULL
;
5197 afterTriggers
.events
.head
= NULL
;
5198 afterTriggers
.events
.tail
= NULL
;
5199 afterTriggers
.events
.tailfree
= NULL
;
5203 * Forget any subtransaction state as well. Since this can't be very
5204 * large, we let the eventual reset of TopTransactionContext free the
5205 * memory instead of doing it here.
5207 afterTriggers
.trans_stack
= NULL
;
5208 afterTriggers
.maxtransdepth
= 0;
5212 * Forget the query stack and constraint-related state information. As
5213 * with the subtransaction state information, we don't bother freeing the
5216 afterTriggers
.query_stack
= NULL
;
5217 afterTriggers
.maxquerydepth
= 0;
5218 afterTriggers
.state
= NULL
;
5220 /* No more afterTriggers manipulation until next transaction starts. */
5221 afterTriggers
.query_depth
= -1;
5225 * AfterTriggerBeginSubXact()
5227 * Start a subtransaction.
5230 AfterTriggerBeginSubXact(void)
5232 int my_level
= GetCurrentTransactionNestLevel();
5235 * Allocate more space in the trans_stack if needed. (Note: because the
5236 * minimum nest level of a subtransaction is 2, we waste the first couple
5237 * entries of the array; not worth the notational effort to avoid it.)
5239 while (my_level
>= afterTriggers
.maxtransdepth
)
5241 if (afterTriggers
.maxtransdepth
== 0)
5243 /* Arbitrarily initialize for max of 8 subtransaction levels */
5244 afterTriggers
.trans_stack
= (AfterTriggersTransData
*)
5245 MemoryContextAlloc(TopTransactionContext
,
5246 8 * sizeof(AfterTriggersTransData
));
5247 afterTriggers
.maxtransdepth
= 8;
5251 /* repalloc will keep the stack in the same context */
5252 int new_alloc
= afterTriggers
.maxtransdepth
* 2;
5254 afterTriggers
.trans_stack
= (AfterTriggersTransData
*)
5255 repalloc(afterTriggers
.trans_stack
,
5256 new_alloc
* sizeof(AfterTriggersTransData
));
5257 afterTriggers
.maxtransdepth
= new_alloc
;
5262 * Push the current information into the stack. The SET CONSTRAINTS state
5263 * is not saved until/unless changed. Likewise, we don't make a
5264 * per-subtransaction event context until needed.
5266 afterTriggers
.trans_stack
[my_level
].state
= NULL
;
5267 afterTriggers
.trans_stack
[my_level
].events
= afterTriggers
.events
;
5268 afterTriggers
.trans_stack
[my_level
].query_depth
= afterTriggers
.query_depth
;
5269 afterTriggers
.trans_stack
[my_level
].firing_counter
= afterTriggers
.firing_counter
;
5273 * AfterTriggerEndSubXact()
5275 * The current subtransaction is ending.
5278 AfterTriggerEndSubXact(bool isCommit
)
5280 int my_level
= GetCurrentTransactionNestLevel();
5281 SetConstraintState state
;
5282 AfterTriggerEvent event
;
5283 AfterTriggerEventChunk
*chunk
;
5284 CommandId subxact_firing_id
;
5287 * Pop the prior state if needed.
5291 Assert(my_level
< afterTriggers
.maxtransdepth
);
5292 /* If we saved a prior state, we don't need it anymore */
5293 state
= afterTriggers
.trans_stack
[my_level
].state
;
5296 /* this avoids double pfree if error later: */
5297 afterTriggers
.trans_stack
[my_level
].state
= NULL
;
5298 Assert(afterTriggers
.query_depth
==
5299 afterTriggers
.trans_stack
[my_level
].query_depth
);
5304 * Aborting. It is possible subxact start failed before calling
5305 * AfterTriggerBeginSubXact, in which case we mustn't risk touching
5306 * trans_stack levels that aren't there.
5308 if (my_level
>= afterTriggers
.maxtransdepth
)
5312 * Release query-level storage for queries being aborted, and restore
5313 * query_depth to its pre-subxact value. This assumes that a
5314 * subtransaction will not add events to query levels started in a
5315 * earlier transaction state.
5317 while (afterTriggers
.query_depth
> afterTriggers
.trans_stack
[my_level
].query_depth
)
5319 if (afterTriggers
.query_depth
< afterTriggers
.maxquerydepth
)
5320 AfterTriggerFreeQuery(&afterTriggers
.query_stack
[afterTriggers
.query_depth
]);
5321 afterTriggers
.query_depth
--;
5323 Assert(afterTriggers
.query_depth
==
5324 afterTriggers
.trans_stack
[my_level
].query_depth
);
5327 * Restore the global deferred-event list to its former length,
5328 * discarding any events queued by the subxact.
5330 afterTriggerRestoreEventList(&afterTriggers
.events
,
5331 &afterTriggers
.trans_stack
[my_level
].events
);
5334 * Restore the trigger state. If the saved state is NULL, then this
5335 * subxact didn't save it, so it doesn't need restoring.
5337 state
= afterTriggers
.trans_stack
[my_level
].state
;
5340 pfree(afterTriggers
.state
);
5341 afterTriggers
.state
= state
;
5343 /* this avoids double pfree if error later: */
5344 afterTriggers
.trans_stack
[my_level
].state
= NULL
;
5347 * Scan for any remaining deferred events that were marked DONE or IN
5348 * PROGRESS by this subxact or a child, and un-mark them. We can
5349 * recognize such events because they have a firing ID greater than or
5350 * equal to the firing_counter value we saved at subtransaction start.
5351 * (This essentially assumes that the current subxact includes all
5352 * subxacts started after it.)
5354 subxact_firing_id
= afterTriggers
.trans_stack
[my_level
].firing_counter
;
5355 for_each_event_chunk(event
, chunk
, afterTriggers
.events
)
5357 AfterTriggerShared evtshared
= GetTriggerSharedData(event
);
5359 if (event
->ate_flags
&
5360 (AFTER_TRIGGER_DONE
| AFTER_TRIGGER_IN_PROGRESS
))
5362 if (evtshared
->ats_firing_id
>= subxact_firing_id
)
5364 ~(AFTER_TRIGGER_DONE
| AFTER_TRIGGER_IN_PROGRESS
);
5371 * Get the transition table for the given event and depending on whether we are
5372 * processing the old or the new tuple.
5374 static Tuplestorestate
*
5375 GetAfterTriggersTransitionTable(int event
,
5376 TupleTableSlot
*oldslot
,
5377 TupleTableSlot
*newslot
,
5378 TransitionCaptureState
*transition_capture
)
5380 Tuplestorestate
*tuplestore
= NULL
;
5381 bool delete_old_table
= transition_capture
->tcs_delete_old_table
;
5382 bool update_old_table
= transition_capture
->tcs_update_old_table
;
5383 bool update_new_table
= transition_capture
->tcs_update_new_table
;
5384 bool insert_new_table
= transition_capture
->tcs_insert_new_table
;
5387 * For INSERT events NEW should be non-NULL, for DELETE events OLD should
5388 * be non-NULL, whereas for UPDATE events normally both OLD and NEW are
5389 * non-NULL. But for UPDATE events fired for capturing transition tuples
5390 * during UPDATE partition-key row movement, OLD is NULL when the event is
5391 * for a row being inserted, whereas NEW is NULL when the event is for a
5392 * row being deleted.
5394 Assert(!(event
== TRIGGER_EVENT_DELETE
&& delete_old_table
&&
5395 TupIsNull(oldslot
)));
5396 Assert(!(event
== TRIGGER_EVENT_INSERT
&& insert_new_table
&&
5397 TupIsNull(newslot
)));
5399 if (!TupIsNull(oldslot
))
5401 Assert(TupIsNull(newslot
));
5402 if (event
== TRIGGER_EVENT_DELETE
&& delete_old_table
)
5403 tuplestore
= transition_capture
->tcs_private
->old_del_tuplestore
;
5404 else if (event
== TRIGGER_EVENT_UPDATE
&& update_old_table
)
5405 tuplestore
= transition_capture
->tcs_private
->old_upd_tuplestore
;
5407 else if (!TupIsNull(newslot
))
5409 Assert(TupIsNull(oldslot
));
5410 if (event
== TRIGGER_EVENT_INSERT
&& insert_new_table
)
5411 tuplestore
= transition_capture
->tcs_private
->new_ins_tuplestore
;
5412 else if (event
== TRIGGER_EVENT_UPDATE
&& update_new_table
)
5413 tuplestore
= transition_capture
->tcs_private
->new_upd_tuplestore
;
5420 * Add the given heap tuple to the given tuplestore, applying the conversion
5423 * If original_insert_tuple is given, we can add that tuple without conversion.
5426 TransitionTableAddTuple(EState
*estate
,
5427 TransitionCaptureState
*transition_capture
,
5428 ResultRelInfo
*relinfo
,
5429 TupleTableSlot
*slot
,
5430 TupleTableSlot
*original_insert_tuple
,
5431 Tuplestorestate
*tuplestore
)
5433 TupleConversionMap
*map
;
5436 * Nothing needs to be done if we don't have a tuplestore.
5438 if (tuplestore
== NULL
)
5441 if (original_insert_tuple
)
5442 tuplestore_puttupleslot(tuplestore
, original_insert_tuple
);
5443 else if ((map
= ExecGetChildToRootMap(relinfo
)) != NULL
)
5445 AfterTriggersTableData
*table
= transition_capture
->tcs_private
;
5446 TupleTableSlot
*storeslot
;
5448 storeslot
= GetAfterTriggersStoreSlot(table
, map
->outdesc
);
5449 execute_attr_map_slot(map
->attrMap
, slot
, storeslot
);
5450 tuplestore_puttupleslot(tuplestore
, storeslot
);
5453 tuplestore_puttupleslot(tuplestore
, slot
);
5457 * AfterTriggerEnlargeQueryState()
5459 * Prepare the necessary state so that we can record AFTER trigger events
5460 * queued by a query. It is allowed to have nested queries within a
5461 * (sub)transaction, so we need to have separate state for each query
5466 AfterTriggerEnlargeQueryState(void)
5468 int init_depth
= afterTriggers
.maxquerydepth
;
5470 Assert(afterTriggers
.query_depth
>= afterTriggers
.maxquerydepth
);
5472 if (afterTriggers
.maxquerydepth
== 0)
5474 int new_alloc
= Max(afterTriggers
.query_depth
+ 1, 8);
5476 afterTriggers
.query_stack
= (AfterTriggersQueryData
*)
5477 MemoryContextAlloc(TopTransactionContext
,
5478 new_alloc
* sizeof(AfterTriggersQueryData
));
5479 afterTriggers
.maxquerydepth
= new_alloc
;
5483 /* repalloc will keep the stack in the same context */
5484 int old_alloc
= afterTriggers
.maxquerydepth
;
5485 int new_alloc
= Max(afterTriggers
.query_depth
+ 1,
5488 afterTriggers
.query_stack
= (AfterTriggersQueryData
*)
5489 repalloc(afterTriggers
.query_stack
,
5490 new_alloc
* sizeof(AfterTriggersQueryData
));
5491 afterTriggers
.maxquerydepth
= new_alloc
;
5494 /* Initialize new array entries to empty */
5495 while (init_depth
< afterTriggers
.maxquerydepth
)
5497 AfterTriggersQueryData
*qs
= &afterTriggers
.query_stack
[init_depth
];
5499 qs
->events
.head
= NULL
;
5500 qs
->events
.tail
= NULL
;
5501 qs
->events
.tailfree
= NULL
;
5502 qs
->fdw_tuplestore
= NULL
;
5510 * Create an empty SetConstraintState with room for numalloc trigstates
5512 static SetConstraintState
5513 SetConstraintStateCreate(int numalloc
)
5515 SetConstraintState state
;
5517 /* Behave sanely with numalloc == 0 */
5522 * We assume that zeroing will correctly initialize the state values.
5524 state
= (SetConstraintState
)
5525 MemoryContextAllocZero(TopTransactionContext
,
5526 offsetof(SetConstraintStateData
, trigstates
) +
5527 numalloc
* sizeof(SetConstraintTriggerData
));
5529 state
->numalloc
= numalloc
;
5535 * Copy a SetConstraintState
5537 static SetConstraintState
5538 SetConstraintStateCopy(SetConstraintState origstate
)
5540 SetConstraintState state
;
5542 state
= SetConstraintStateCreate(origstate
->numstates
);
5544 state
->all_isset
= origstate
->all_isset
;
5545 state
->all_isdeferred
= origstate
->all_isdeferred
;
5546 state
->numstates
= origstate
->numstates
;
5547 memcpy(state
->trigstates
, origstate
->trigstates
,
5548 origstate
->numstates
* sizeof(SetConstraintTriggerData
));
5554 * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
5555 * pointer to the state object (it will change if we have to repalloc).
5557 static SetConstraintState
5558 SetConstraintStateAddItem(SetConstraintState state
,
5559 Oid tgoid
, bool tgisdeferred
)
5561 if (state
->numstates
>= state
->numalloc
)
5563 int newalloc
= state
->numalloc
* 2;
5565 newalloc
= Max(newalloc
, 8); /* in case original has size 0 */
5566 state
= (SetConstraintState
)
5568 offsetof(SetConstraintStateData
, trigstates
) +
5569 newalloc
* sizeof(SetConstraintTriggerData
));
5570 state
->numalloc
= newalloc
;
5571 Assert(state
->numstates
< state
->numalloc
);
5574 state
->trigstates
[state
->numstates
].sct_tgoid
= tgoid
;
5575 state
->trigstates
[state
->numstates
].sct_tgisdeferred
= tgisdeferred
;
5582 * AfterTriggerSetState()
5584 * Execute the SET CONSTRAINTS ... utility command.
5588 AfterTriggerSetState(ConstraintsSetStmt
*stmt
)
5590 int my_level
= GetCurrentTransactionNestLevel();
5592 /* If we haven't already done so, initialize our state. */
5593 if (afterTriggers
.state
== NULL
)
5594 afterTriggers
.state
= SetConstraintStateCreate(8);
5597 * If in a subtransaction, and we didn't save the current state already,
5598 * save it so it can be restored if the subtransaction aborts.
5601 afterTriggers
.trans_stack
[my_level
].state
== NULL
)
5603 afterTriggers
.trans_stack
[my_level
].state
=
5604 SetConstraintStateCopy(afterTriggers
.state
);
5608 * Handle SET CONSTRAINTS ALL ...
5610 if (stmt
->constraints
== NIL
)
5613 * Forget any previous SET CONSTRAINTS commands in this transaction.
5615 afterTriggers
.state
->numstates
= 0;
5618 * Set the per-transaction ALL state to known.
5620 afterTriggers
.state
->all_isset
= true;
5621 afterTriggers
.state
->all_isdeferred
= stmt
->deferred
;
5627 List
*conoidlist
= NIL
;
5628 List
*tgoidlist
= NIL
;
5632 * Handle SET CONSTRAINTS constraint-name [, ...]
5634 * First, identify all the named constraints and make a list of their
5635 * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
5636 * the same name within a schema, the specifications are not
5637 * necessarily unique. Our strategy is to target all matching
5638 * constraints within the first search-path schema that has any
5639 * matches, but disregard matches in schemas beyond the first match.
5640 * (This is a bit odd but it's the historical behavior.)
5642 * A constraint in a partitioned table may have corresponding
5643 * constraints in the partitions. Grab those too.
5645 conrel
= table_open(ConstraintRelationId
, AccessShareLock
);
5647 foreach(lc
, stmt
->constraints
)
5649 RangeVar
*constraint
= lfirst(lc
);
5651 List
*namespacelist
;
5654 if (constraint
->catalogname
)
5656 if (strcmp(constraint
->catalogname
, get_database_name(MyDatabaseId
)) != 0)
5658 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
5659 errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
5660 constraint
->catalogname
, constraint
->schemaname
,
5661 constraint
->relname
)));
5665 * If we're given the schema name with the constraint, look only
5666 * in that schema. If given a bare constraint name, use the
5667 * search path to find the first matching constraint.
5669 if (constraint
->schemaname
)
5671 Oid namespaceId
= LookupExplicitNamespace(constraint
->schemaname
,
5674 namespacelist
= list_make1_oid(namespaceId
);
5678 namespacelist
= fetch_search_path(true);
5682 foreach(nslc
, namespacelist
)
5684 Oid namespaceId
= lfirst_oid(nslc
);
5685 SysScanDesc conscan
;
5686 ScanKeyData skey
[2];
5689 ScanKeyInit(&skey
[0],
5690 Anum_pg_constraint_conname
,
5691 BTEqualStrategyNumber
, F_NAMEEQ
,
5692 CStringGetDatum(constraint
->relname
));
5693 ScanKeyInit(&skey
[1],
5694 Anum_pg_constraint_connamespace
,
5695 BTEqualStrategyNumber
, F_OIDEQ
,
5696 ObjectIdGetDatum(namespaceId
));
5698 conscan
= systable_beginscan(conrel
, ConstraintNameNspIndexId
,
5699 true, NULL
, 2, skey
);
5701 while (HeapTupleIsValid(tup
= systable_getnext(conscan
)))
5703 Form_pg_constraint con
= (Form_pg_constraint
) GETSTRUCT(tup
);
5705 if (con
->condeferrable
)
5706 conoidlist
= lappend_oid(conoidlist
, con
->oid
);
5707 else if (stmt
->deferred
)
5709 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
5710 errmsg("constraint \"%s\" is not deferrable",
5711 constraint
->relname
)));
5715 systable_endscan(conscan
);
5718 * Once we've found a matching constraint we do not search
5719 * later parts of the search path.
5725 list_free(namespacelist
);
5732 (errcode(ERRCODE_UNDEFINED_OBJECT
),
5733 errmsg("constraint \"%s\" does not exist",
5734 constraint
->relname
)));
5738 * Scan for any possible descendants of the constraints. We append
5739 * whatever we find to the same list that we're scanning; this has the
5740 * effect that we create new scans for those, too, so if there are
5741 * further descendents, we'll also catch them.
5743 foreach(lc
, conoidlist
)
5745 Oid parent
= lfirst_oid(lc
);
5751 Anum_pg_constraint_conparentid
,
5752 BTEqualStrategyNumber
, F_OIDEQ
,
5753 ObjectIdGetDatum(parent
));
5755 scan
= systable_beginscan(conrel
, ConstraintParentIndexId
, true, NULL
, 1, &key
);
5757 while (HeapTupleIsValid(tuple
= systable_getnext(scan
)))
5759 Form_pg_constraint con
= (Form_pg_constraint
) GETSTRUCT(tuple
);
5761 conoidlist
= lappend_oid(conoidlist
, con
->oid
);
5764 systable_endscan(scan
);
5767 table_close(conrel
, AccessShareLock
);
5770 * Now, locate the trigger(s) implementing each of these constraints,
5771 * and make a list of their OIDs.
5773 tgrel
= table_open(TriggerRelationId
, AccessShareLock
);
5775 foreach(lc
, conoidlist
)
5777 Oid conoid
= lfirst_oid(lc
);
5783 Anum_pg_trigger_tgconstraint
,
5784 BTEqualStrategyNumber
, F_OIDEQ
,
5785 ObjectIdGetDatum(conoid
));
5787 tgscan
= systable_beginscan(tgrel
, TriggerConstraintIndexId
, true,
5790 while (HeapTupleIsValid(htup
= systable_getnext(tgscan
)))
5792 Form_pg_trigger pg_trigger
= (Form_pg_trigger
) GETSTRUCT(htup
);
5795 * Silently skip triggers that are marked as non-deferrable in
5796 * pg_trigger. This is not an error condition, since a
5797 * deferrable RI constraint may have some non-deferrable
5800 if (pg_trigger
->tgdeferrable
)
5801 tgoidlist
= lappend_oid(tgoidlist
, pg_trigger
->oid
);
5804 systable_endscan(tgscan
);
5807 table_close(tgrel
, AccessShareLock
);
5810 * Now we can set the trigger states of individual triggers for this
5813 foreach(lc
, tgoidlist
)
5815 Oid tgoid
= lfirst_oid(lc
);
5816 SetConstraintState state
= afterTriggers
.state
;
5820 for (i
= 0; i
< state
->numstates
; i
++)
5822 if (state
->trigstates
[i
].sct_tgoid
== tgoid
)
5824 state
->trigstates
[i
].sct_tgisdeferred
= stmt
->deferred
;
5831 afterTriggers
.state
=
5832 SetConstraintStateAddItem(state
, tgoid
, stmt
->deferred
);
5838 * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
5839 * checks against that constraint must be made when the SET CONSTRAINTS
5840 * command is executed -- i.e. the effects of the SET CONSTRAINTS command
5841 * apply retroactively. We've updated the constraints state, so scan the
5842 * list of previously deferred events to fire any that have now become
5845 * Obviously, if this was SET ... DEFERRED then it can't have converted
5846 * any unfired events to immediate, so we need do nothing in that case.
5848 if (!stmt
->deferred
)
5850 AfterTriggerEventList
*events
= &afterTriggers
.events
;
5851 bool snapshot_set
= false;
5853 while (afterTriggerMarkEvents(events
, NULL
, true))
5855 CommandId firing_id
= afterTriggers
.firing_counter
++;
5858 * Make sure a snapshot has been established in case trigger
5859 * functions need one. Note that we avoid setting a snapshot if
5860 * we don't find at least one trigger that has to be fired now.
5861 * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
5862 * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
5863 * at the start of a transaction it's not possible for any trigger
5864 * events to be queued yet.)
5868 PushActiveSnapshot(GetTransactionSnapshot());
5869 snapshot_set
= true;
5873 * We can delete fired events if we are at top transaction level,
5874 * but we'd better not if inside a subtransaction, since the
5875 * subtransaction could later get rolled back.
5877 if (afterTriggerInvokeEvents(events
, firing_id
, NULL
,
5878 !IsSubTransaction()))
5879 break; /* all fired */
5883 PopActiveSnapshot();
5888 * AfterTriggerPendingOnRel()
5889 * Test to see if there are any pending after-trigger events for rel.
5891 * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
5892 * it is unsafe to perform major surgery on a relation. Note that only
5893 * local pending events are examined. We assume that having exclusive lock
5894 * on a rel guarantees there are no unserviced events in other backends ---
5895 * but having a lock does not prevent there being such events in our own.
5897 * In some scenarios it'd be reasonable to remove pending events (more
5898 * specifically, mark them DONE by the current subxact) but without a lot
5899 * of knowledge of the trigger semantics we can't do this in general.
5903 AfterTriggerPendingOnRel(Oid relid
)
5905 AfterTriggerEvent event
;
5906 AfterTriggerEventChunk
*chunk
;
5909 /* Scan queued events */
5910 for_each_event_chunk(event
, chunk
, afterTriggers
.events
)
5912 AfterTriggerShared evtshared
= GetTriggerSharedData(event
);
5915 * We can ignore completed events. (Even if a DONE flag is rolled
5916 * back by subxact abort, it's OK because the effects of the TRUNCATE
5917 * or whatever must get rolled back too.)
5919 if (event
->ate_flags
& AFTER_TRIGGER_DONE
)
5922 if (evtshared
->ats_relid
== relid
)
5927 * Also scan events queued by incomplete queries. This could only matter
5928 * if TRUNCATE/etc is executed by a function or trigger within an updating
5929 * query on the same relation, which is pretty perverse, but let's check.
5931 for (depth
= 0; depth
<= afterTriggers
.query_depth
&& depth
< afterTriggers
.maxquerydepth
; depth
++)
5933 for_each_event_chunk(event
, chunk
, afterTriggers
.query_stack
[depth
].events
)
5935 AfterTriggerShared evtshared
= GetTriggerSharedData(event
);
5937 if (event
->ate_flags
& AFTER_TRIGGER_DONE
)
5940 if (evtshared
->ats_relid
== relid
)
5949 * AfterTriggerSaveEvent()
5951 * Called by ExecA[RS]...Triggers() to queue up the triggers that should
5952 * be fired for an event.
5954 * NOTE: this is called whenever there are any triggers associated with
5955 * the event (even if they are disabled). This function decides which
5956 * triggers actually need to be queued. It is also called after each row,
5957 * even if there are no triggers for that event, if there are any AFTER
5958 * STATEMENT triggers for the statement which use transition tables, so that
5959 * the transition tuplestores can be built. Furthermore, if the transition
5960 * capture is happening for UPDATEd rows being moved to another partition due
5961 * to the partition-key being changed, then this function is called once when
5962 * the row is deleted (to capture OLD row), and once when the row is inserted
5963 * into another partition (to capture NEW row). This is done separately because
5964 * DELETE and INSERT happen on different tables.
5966 * Transition tuplestores are built now, rather than when events are pulled
5967 * off of the queue because AFTER ROW triggers are allowed to select from the
5968 * transition tables for the statement.
5970 * This contains special support to queue the update events for the case where
5971 * a partitioned table undergoing a cross-partition update may have foreign
5972 * keys pointing into it. Normally, a partitioned table's row triggers are
5973 * not fired because the leaf partition(s) which are modified as a result of
5974 * the operation on the partitioned table contain the same triggers which are
5975 * fired instead. But that general scheme can cause problematic behavior with
5976 * foreign key triggers during cross-partition updates, which are implemented
5977 * as DELETE on the source partition followed by INSERT into the destination
5978 * partition. Specifically, firing DELETE triggers would lead to the wrong
5979 * foreign key action to be enforced considering that the original command is
5980 * UPDATE; in this case, this function is called with relinfo as the
5981 * partitioned table, and src_partinfo and dst_partinfo referring to the
5982 * source and target leaf partitions, respectively.
5984 * is_crosspart_update is true either when a DELETE event is fired on the
5985 * source partition (which is to be ignored) or an UPDATE event is fired on
5986 * the root partitioned table.
5990 AfterTriggerSaveEvent(EState
*estate
, ResultRelInfo
*relinfo
,
5991 ResultRelInfo
*src_partinfo
,
5992 ResultRelInfo
*dst_partinfo
,
5993 int event
, bool row_trigger
,
5994 TupleTableSlot
*oldslot
, TupleTableSlot
*newslot
,
5995 List
*recheckIndexes
, Bitmapset
*modifiedCols
,
5996 TransitionCaptureState
*transition_capture
,
5997 bool is_crosspart_update
)
5999 Relation rel
= relinfo
->ri_RelationDesc
;
6000 TriggerDesc
*trigdesc
= relinfo
->ri_TrigDesc
;
6001 AfterTriggerEventData new_event
;
6002 AfterTriggerSharedData new_shared
;
6003 char relkind
= rel
->rd_rel
->relkind
;
6007 Tuplestorestate
*fdw_tuplestore
= NULL
;
6010 * Check state. We use a normal test not Assert because it is possible to
6011 * reach here in the wrong state given misconfigured RI triggers, in
6012 * particular deferring a cascade action trigger.
6014 if (afterTriggers
.query_depth
< 0)
6015 elog(ERROR
, "AfterTriggerSaveEvent() called outside of query");
6017 /* Be sure we have enough space to record events at this query depth. */
6018 if (afterTriggers
.query_depth
>= afterTriggers
.maxquerydepth
)
6019 AfterTriggerEnlargeQueryState();
6022 * If the directly named relation has any triggers with transition tables,
6023 * then we need to capture transition tuples.
6025 if (row_trigger
&& transition_capture
!= NULL
)
6027 TupleTableSlot
*original_insert_tuple
= transition_capture
->tcs_original_insert_tuple
;
6030 * Capture the old tuple in the appropriate transition table based on
6033 if (!TupIsNull(oldslot
))
6035 Tuplestorestate
*old_tuplestore
;
6037 old_tuplestore
= GetAfterTriggersTransitionTable(event
,
6040 transition_capture
);
6041 TransitionTableAddTuple(estate
, transition_capture
, relinfo
,
6042 oldslot
, NULL
, old_tuplestore
);
6046 * Capture the new tuple in the appropriate transition table based on
6049 if (!TupIsNull(newslot
))
6051 Tuplestorestate
*new_tuplestore
;
6053 new_tuplestore
= GetAfterTriggersTransitionTable(event
,
6056 transition_capture
);
6057 TransitionTableAddTuple(estate
, transition_capture
, relinfo
,
6058 newslot
, original_insert_tuple
, new_tuplestore
);
6062 * If transition tables are the only reason we're here, return. As
6063 * mentioned above, we can also be here during update tuple routing in
6064 * presence of transition tables, in which case this function is
6065 * called separately for OLD and NEW, so we expect exactly one of them
6068 if (trigdesc
== NULL
||
6069 (event
== TRIGGER_EVENT_DELETE
&& !trigdesc
->trig_delete_after_row
) ||
6070 (event
== TRIGGER_EVENT_INSERT
&& !trigdesc
->trig_insert_after_row
) ||
6071 (event
== TRIGGER_EVENT_UPDATE
&& !trigdesc
->trig_update_after_row
) ||
6072 (event
== TRIGGER_EVENT_UPDATE
&& (TupIsNull(oldslot
) ^ TupIsNull(newslot
))))
6077 * We normally don't see partitioned tables here for row level triggers
6078 * except in the special case of a cross-partition update. In that case,
6079 * nodeModifyTable.c:ExecCrossPartitionUpdateForeignKey() calls here to
6080 * queue an update event on the root target partitioned table, also
6081 * passing the source and destination partitions and their tuples.
6083 Assert(!row_trigger
||
6084 rel
->rd_rel
->relkind
!= RELKIND_PARTITIONED_TABLE
||
6085 (is_crosspart_update
&&
6086 TRIGGER_FIRED_BY_UPDATE(event
) &&
6087 src_partinfo
!= NULL
&& dst_partinfo
!= NULL
));
6090 * Validate the event code and collect the associated tuple CTIDs.
6092 * The event code will be used both as a bitmask and an array offset, so
6093 * validation is important to make sure we don't walk off the edge of our
6096 * Also, if we're considering statement-level triggers, check whether we
6097 * already queued a set of them for this event, and cancel the prior set
6098 * if so. This preserves the behavior that statement-level triggers fire
6099 * just once per statement and fire after row-level triggers.
6103 case TRIGGER_EVENT_INSERT
:
6104 tgtype_event
= TRIGGER_TYPE_INSERT
;
6107 Assert(oldslot
== NULL
);
6108 Assert(newslot
!= NULL
);
6109 ItemPointerCopy(&(newslot
->tts_tid
), &(new_event
.ate_ctid1
));
6110 ItemPointerSetInvalid(&(new_event
.ate_ctid2
));
6114 Assert(oldslot
== NULL
);
6115 Assert(newslot
== NULL
);
6116 ItemPointerSetInvalid(&(new_event
.ate_ctid1
));
6117 ItemPointerSetInvalid(&(new_event
.ate_ctid2
));
6118 cancel_prior_stmt_triggers(RelationGetRelid(rel
),
6122 case TRIGGER_EVENT_DELETE
:
6123 tgtype_event
= TRIGGER_TYPE_DELETE
;
6126 Assert(oldslot
!= NULL
);
6127 Assert(newslot
== NULL
);
6128 ItemPointerCopy(&(oldslot
->tts_tid
), &(new_event
.ate_ctid1
));
6129 ItemPointerSetInvalid(&(new_event
.ate_ctid2
));
6133 Assert(oldslot
== NULL
);
6134 Assert(newslot
== NULL
);
6135 ItemPointerSetInvalid(&(new_event
.ate_ctid1
));
6136 ItemPointerSetInvalid(&(new_event
.ate_ctid2
));
6137 cancel_prior_stmt_triggers(RelationGetRelid(rel
),
6141 case TRIGGER_EVENT_UPDATE
:
6142 tgtype_event
= TRIGGER_TYPE_UPDATE
;
6145 Assert(oldslot
!= NULL
);
6146 Assert(newslot
!= NULL
);
6147 ItemPointerCopy(&(oldslot
->tts_tid
), &(new_event
.ate_ctid1
));
6148 ItemPointerCopy(&(newslot
->tts_tid
), &(new_event
.ate_ctid2
));
6151 * Also remember the OIDs of partitions to fetch these tuples
6152 * out of later in AfterTriggerExecute().
6154 if (rel
->rd_rel
->relkind
== RELKIND_PARTITIONED_TABLE
)
6156 Assert(src_partinfo
!= NULL
&& dst_partinfo
!= NULL
);
6157 new_event
.ate_src_part
=
6158 RelationGetRelid(src_partinfo
->ri_RelationDesc
);
6159 new_event
.ate_dst_part
=
6160 RelationGetRelid(dst_partinfo
->ri_RelationDesc
);
6165 Assert(oldslot
== NULL
);
6166 Assert(newslot
== NULL
);
6167 ItemPointerSetInvalid(&(new_event
.ate_ctid1
));
6168 ItemPointerSetInvalid(&(new_event
.ate_ctid2
));
6169 cancel_prior_stmt_triggers(RelationGetRelid(rel
),
6173 case TRIGGER_EVENT_TRUNCATE
:
6174 tgtype_event
= TRIGGER_TYPE_TRUNCATE
;
6175 Assert(oldslot
== NULL
);
6176 Assert(newslot
== NULL
);
6177 ItemPointerSetInvalid(&(new_event
.ate_ctid1
));
6178 ItemPointerSetInvalid(&(new_event
.ate_ctid2
));
6181 elog(ERROR
, "invalid after-trigger event code: %d", event
);
6182 tgtype_event
= 0; /* keep compiler quiet */
6186 /* Determine flags */
6187 if (!(relkind
== RELKIND_FOREIGN_TABLE
&& row_trigger
))
6189 if (row_trigger
&& event
== TRIGGER_EVENT_UPDATE
)
6191 if (relkind
== RELKIND_PARTITIONED_TABLE
)
6192 new_event
.ate_flags
= AFTER_TRIGGER_CP_UPDATE
;
6194 new_event
.ate_flags
= AFTER_TRIGGER_2CTID
;
6197 new_event
.ate_flags
= AFTER_TRIGGER_1CTID
;
6200 /* else, we'll initialize ate_flags for each trigger */
6202 tgtype_level
= (row_trigger
? TRIGGER_TYPE_ROW
: TRIGGER_TYPE_STATEMENT
);
6205 * Must convert/copy the source and destination partition tuples into the
6206 * root partitioned table's format/slot, because the processing in the
6207 * loop below expects both oldslot and newslot tuples to be in that form.
6209 if (row_trigger
&& rel
->rd_rel
->relkind
== RELKIND_PARTITIONED_TABLE
)
6211 TupleTableSlot
*rootslot
;
6212 TupleConversionMap
*map
;
6214 rootslot
= ExecGetTriggerOldSlot(estate
, relinfo
);
6215 map
= ExecGetChildToRootMap(src_partinfo
);
6217 oldslot
= execute_attr_map_slot(map
->attrMap
,
6221 oldslot
= ExecCopySlot(rootslot
, oldslot
);
6223 rootslot
= ExecGetTriggerNewSlot(estate
, relinfo
);
6224 map
= ExecGetChildToRootMap(dst_partinfo
);
6226 newslot
= execute_attr_map_slot(map
->attrMap
,
6230 newslot
= ExecCopySlot(rootslot
, newslot
);
6233 for (i
= 0; i
< trigdesc
->numtriggers
; i
++)
6235 Trigger
*trigger
= &trigdesc
->triggers
[i
];
6237 if (!TRIGGER_TYPE_MATCHES(trigger
->tgtype
,
6242 if (!TriggerEnabled(estate
, relinfo
, trigger
, event
,
6243 modifiedCols
, oldslot
, newslot
))
6246 if (relkind
== RELKIND_FOREIGN_TABLE
&& row_trigger
)
6248 if (fdw_tuplestore
== NULL
)
6250 fdw_tuplestore
= GetCurrentFDWTuplestore();
6251 new_event
.ate_flags
= AFTER_TRIGGER_FDW_FETCH
;
6254 /* subsequent event for the same tuple */
6255 new_event
.ate_flags
= AFTER_TRIGGER_FDW_REUSE
;
6259 * If the trigger is a foreign key enforcement trigger, there are
6260 * certain cases where we can skip queueing the event because we can
6261 * tell by inspection that the FK constraint will still pass. There
6262 * are also some cases during cross-partition updates of a partitioned
6263 * table where queuing the event can be skipped.
6265 if (TRIGGER_FIRED_BY_UPDATE(event
) || TRIGGER_FIRED_BY_DELETE(event
))
6267 switch (RI_FKey_trigger_type(trigger
->tgfoid
))
6272 * For cross-partitioned updates of partitioned PK table,
6273 * skip the event fired by the component delete on the
6274 * source leaf partition unless the constraint originates
6275 * in the partition itself (!tgisclone), because the
6276 * update event that will be fired on the root
6277 * (partitioned) target table will be used to perform the
6278 * necessary foreign key enforcement action.
6280 if (is_crosspart_update
&&
6281 TRIGGER_FIRED_BY_DELETE(event
) &&
6285 /* Update or delete on trigger's PK table */
6286 if (!RI_FKey_pk_upd_check_required(trigger
, rel
,
6289 /* skip queuing this event */
6297 * Update on trigger's FK table. We can skip the update
6298 * event fired on a partitioned table during a
6299 * cross-partition of that table, because the insert event
6300 * that is fired on the destination leaf partition would
6301 * suffice to perform the necessary foreign key check.
6302 * Moreover, RI_FKey_fk_upd_check_required() expects to be
6303 * passed a tuple that contains system attributes, most of
6304 * which are not present in the virtual slot belonging to
6305 * a partitioned table.
6307 if (rel
->rd_rel
->relkind
== RELKIND_PARTITIONED_TABLE
||
6308 !RI_FKey_fk_upd_check_required(trigger
, rel
,
6311 /* skip queuing this event */
6316 case RI_TRIGGER_NONE
:
6319 * Not an FK trigger. No need to queue the update event
6320 * fired during a cross-partitioned update of a
6321 * partitioned table, because the same row trigger must be
6322 * present in the leaf partition(s) that are affected as
6323 * part of this update and the events fired on them are
6327 rel
->rd_rel
->relkind
== RELKIND_PARTITIONED_TABLE
)
6334 * If the trigger is a deferred unique constraint check trigger, only
6335 * queue it if the unique constraint was potentially violated, which
6336 * we know from index insertion time.
6338 if (trigger
->tgfoid
== F_UNIQUE_KEY_RECHECK
)
6340 if (!list_member_oid(recheckIndexes
, trigger
->tgconstrindid
))
6341 continue; /* Uniqueness definitely not violated */
6345 * Fill in event structure and add it to the current query's queue.
6346 * Note we set ats_table to NULL whenever this trigger doesn't use
6347 * transition tables, to improve sharability of the shared event data.
6349 new_shared
.ats_event
=
6350 (event
& TRIGGER_EVENT_OPMASK
) |
6351 (row_trigger
? TRIGGER_EVENT_ROW
: 0) |
6352 (trigger
->tgdeferrable
? AFTER_TRIGGER_DEFERRABLE
: 0) |
6353 (trigger
->tginitdeferred
? AFTER_TRIGGER_INITDEFERRED
: 0);
6354 new_shared
.ats_tgoid
= trigger
->tgoid
;
6355 new_shared
.ats_relid
= RelationGetRelid(rel
);
6356 new_shared
.ats_firing_id
= 0;
6357 if ((trigger
->tgoldtable
|| trigger
->tgnewtable
) &&
6358 transition_capture
!= NULL
)
6359 new_shared
.ats_table
= transition_capture
->tcs_private
;
6361 new_shared
.ats_table
= NULL
;
6362 new_shared
.ats_modifiedcols
= modifiedCols
;
6364 afterTriggerAddEvent(&afterTriggers
.query_stack
[afterTriggers
.query_depth
].events
,
6365 &new_event
, &new_shared
);
6369 * Finally, spool any foreign tuple(s). The tuplestore squashes them to
6370 * minimal tuples, so this loses any system columns. The executor lost
6371 * those columns before us, for an unrelated reason, so this is fine.
6375 if (oldslot
!= NULL
)
6376 tuplestore_puttupleslot(fdw_tuplestore
, oldslot
);
6377 if (newslot
!= NULL
)
6378 tuplestore_puttupleslot(fdw_tuplestore
, newslot
);
6383 * Detect whether we already queued BEFORE STATEMENT triggers for the given
6384 * relation + operation, and set the flag so the next call will report "true".
6387 before_stmt_triggers_fired(Oid relid
, CmdType cmdType
)
6390 AfterTriggersTableData
*table
;
6392 /* Check state, like AfterTriggerSaveEvent. */
6393 if (afterTriggers
.query_depth
< 0)
6394 elog(ERROR
, "before_stmt_triggers_fired() called outside of query");
6396 /* Be sure we have enough space to record events at this query depth. */
6397 if (afterTriggers
.query_depth
>= afterTriggers
.maxquerydepth
)
6398 AfterTriggerEnlargeQueryState();
6401 * We keep this state in the AfterTriggersTableData that also holds
6402 * transition tables for the relation + operation. In this way, if we are
6403 * forced to make a new set of transition tables because more tuples get
6404 * entered after we've already fired triggers, we will allow a new set of
6405 * statement triggers to get queued.
6407 table
= GetAfterTriggersTableData(relid
, cmdType
);
6408 result
= table
->before_trig_done
;
6409 table
->before_trig_done
= true;
6414 * If we previously queued a set of AFTER STATEMENT triggers for the given
6415 * relation + operation, and they've not been fired yet, cancel them. The
6416 * caller will queue a fresh set that's after any row-level triggers that may
6417 * have been queued by the current sub-statement, preserving (as much as
6418 * possible) the property that AFTER ROW triggers fire before AFTER STATEMENT
6419 * triggers, and that the latter only fire once. This deals with the
6420 * situation where several FK enforcement triggers sequentially queue triggers
6421 * for the same table into the same trigger query level. We can't fully
6422 * prevent odd behavior though: if there are AFTER ROW triggers taking
6423 * transition tables, we don't want to change the transition tables once the
6424 * first such trigger has seen them. In such a case, any additional events
6425 * will result in creating new transition tables and allowing new firings of
6426 * statement triggers.
6428 * This also saves the current event list location so that a later invocation
6429 * of this function can cheaply find the triggers we're about to queue and
6433 cancel_prior_stmt_triggers(Oid relid
, CmdType cmdType
, int tgevent
)
6435 AfterTriggersTableData
*table
;
6436 AfterTriggersQueryData
*qs
= &afterTriggers
.query_stack
[afterTriggers
.query_depth
];
6439 * We keep this state in the AfterTriggersTableData that also holds
6440 * transition tables for the relation + operation. In this way, if we are
6441 * forced to make a new set of transition tables because more tuples get
6442 * entered after we've already fired triggers, we will allow a new set of
6443 * statement triggers to get queued without canceling the old ones.
6445 table
= GetAfterTriggersTableData(relid
, cmdType
);
6447 if (table
->after_trig_done
)
6450 * We want to start scanning from the tail location that existed just
6451 * before we inserted any statement triggers. But the events list
6452 * might've been entirely empty then, in which case scan from the
6455 AfterTriggerEvent event
;
6456 AfterTriggerEventChunk
*chunk
;
6458 if (table
->after_trig_events
.tail
)
6460 chunk
= table
->after_trig_events
.tail
;
6461 event
= (AfterTriggerEvent
) table
->after_trig_events
.tailfree
;
6465 chunk
= qs
->events
.head
;
6469 for_each_chunk_from(chunk
)
6472 event
= (AfterTriggerEvent
) CHUNK_DATA_START(chunk
);
6473 for_each_event_from(event
, chunk
)
6475 AfterTriggerShared evtshared
= GetTriggerSharedData(event
);
6478 * Exit loop when we reach events that aren't AS triggers for
6479 * the target relation.
6481 if (evtshared
->ats_relid
!= relid
)
6483 if ((evtshared
->ats_event
& TRIGGER_EVENT_OPMASK
) != tgevent
)
6485 if (!TRIGGER_FIRED_FOR_STATEMENT(evtshared
->ats_event
))
6487 if (!TRIGGER_FIRED_AFTER(evtshared
->ats_event
))
6489 /* OK, mark it DONE */
6490 event
->ate_flags
&= ~AFTER_TRIGGER_IN_PROGRESS
;
6491 event
->ate_flags
|= AFTER_TRIGGER_DONE
;
6493 /* signal we must reinitialize event ptr for next chunk */
6499 /* In any case, save current insertion point for next time */
6500 table
->after_trig_done
= true;
6501 table
->after_trig_events
= qs
->events
;
6505 * GUC assign_hook for session_replication_role
6508 assign_session_replication_role(int newval
, void *extra
)
6511 * Must flush the plan cache when changing replication role; but don't
6512 * flush unnecessarily.
6514 if (SessionReplicationRole
!= newval
)
6519 * SQL function pg_trigger_depth()
6522 pg_trigger_depth(PG_FUNCTION_ARGS
)
6524 PG_RETURN_INT32(MyTriggerDepth
);