1 /*-------------------------------------------------------------------------
4 * top level executor interface routines
12 * These four procedures are the external interface to the executor.
13 * In each case, the query descriptor is required as an argument.
15 * ExecutorStart must be called at the beginning of execution of any
16 * query plan and ExecutorEnd must always be called at the end of
17 * execution of a plan (unless it is aborted due to error).
19 * ExecutorRun accepts direction and count arguments that specify whether
20 * the plan is to be executed forwards, backwards, and for how many tuples.
21 * In some cases ExecutorRun may be called multiple times to process all
22 * the tuples for a plan. It is also acceptable to stop short of executing
23 * the whole plan (but only if it is a SELECT).
25 * ExecutorFinish must be called after the final ExecutorRun call and
26 * before ExecutorEnd. This can be omitted only in case of EXPLAIN,
27 * which should also omit ExecutorRun.
29 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
30 * Portions Copyright (c) 1994, Regents of the University of California
34 * src/backend/executor/execMain.c
36 *-------------------------------------------------------------------------
40 #include "access/sysattr.h"
41 #include "access/table.h"
42 #include "access/tableam.h"
43 #include "access/xact.h"
44 #include "catalog/namespace.h"
45 #include "catalog/partition.h"
46 #include "commands/matview.h"
47 #include "commands/trigger.h"
48 #include "executor/executor.h"
49 #include "executor/nodeSubplan.h"
50 #include "foreign/fdwapi.h"
51 #include "mb/pg_wchar.h"
52 #include "miscadmin.h"
53 #include "nodes/queryjumble.h"
54 #include "parser/parse_relation.h"
56 #include "rewrite/rewriteHandler.h"
57 #include "tcop/utility.h"
58 #include "utils/acl.h"
59 #include "utils/backend_status.h"
60 #include "utils/lsyscache.h"
61 #include "utils/partcache.h"
62 #include "utils/rls.h"
63 #include "utils/snapmgr.h"
66 /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
67 ExecutorStart_hook_type ExecutorStart_hook
= NULL
;
68 ExecutorRun_hook_type ExecutorRun_hook
= NULL
;
69 ExecutorFinish_hook_type ExecutorFinish_hook
= NULL
;
70 ExecutorEnd_hook_type ExecutorEnd_hook
= NULL
;
72 /* Hook for plugin to get control in ExecCheckPermissions() */
73 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook
= NULL
;
75 /* decls for local routines only used within this module */
76 static void InitPlan(QueryDesc
*queryDesc
, int eflags
);
77 static void CheckValidRowMarkRel(Relation rel
, RowMarkType markType
);
78 static void ExecPostprocessPlan(EState
*estate
);
79 static void ExecEndPlan(PlanState
*planstate
, EState
*estate
);
80 static void ExecutePlan(QueryDesc
*queryDesc
,
84 ScanDirection direction
,
86 static bool ExecCheckOneRelPerms(RTEPermissionInfo
*perminfo
);
87 static bool ExecCheckPermissionsModified(Oid relOid
, Oid userid
,
88 Bitmapset
*modifiedCols
,
89 AclMode requiredPerms
);
90 static void ExecCheckXactReadOnly(PlannedStmt
*plannedstmt
);
91 static void EvalPlanQualStart(EPQState
*epqstate
, Plan
*planTree
);
93 /* end of local decls */
96 /* ----------------------------------------------------------------
99 * This routine must be called at the beginning of any execution of any
102 * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
103 * only because some places use QueryDescs for utility commands). The tupDesc
104 * field of the QueryDesc is filled in to describe the tuples that will be
105 * returned, and the internal fields (estate and planstate) are set up.
107 * eflags contains flag bits as described in executor.h.
109 * NB: the CurrentMemoryContext when this is called will become the parent
110 * of the per-query context used for this Executor invocation.
112 * We provide a function hook variable that lets loadable plugins
113 * get control when ExecutorStart is called. Such a plugin would
114 * normally call standard_ExecutorStart().
116 * ----------------------------------------------------------------
119 ExecutorStart(QueryDesc
*queryDesc
, int eflags
)
122 * In some cases (e.g. an EXECUTE statement or an execute message with the
123 * extended query protocol) the query_id won't be reported, so do it now.
125 * Note that it's harmless to report the query_id multiple times, as the
126 * call will be ignored if the top level query_id has already been
129 pgstat_report_query_id(queryDesc
->plannedstmt
->queryId
, false);
131 if (ExecutorStart_hook
)
132 (*ExecutorStart_hook
) (queryDesc
, eflags
);
134 standard_ExecutorStart(queryDesc
, eflags
);
138 standard_ExecutorStart(QueryDesc
*queryDesc
, int eflags
)
141 MemoryContext oldcontext
;
143 /* sanity checks: queryDesc must not be started already */
144 Assert(queryDesc
!= NULL
);
145 Assert(queryDesc
->estate
== NULL
);
147 /* caller must ensure the query's snapshot is active */
148 Assert(GetActiveSnapshot() == queryDesc
->snapshot
);
151 * If the transaction is read-only, we need to check if any writes are
152 * planned to non-temporary tables. EXPLAIN is considered read-only.
154 * Don't allow writes in parallel mode. Supporting UPDATE and DELETE
155 * would require (a) storing the combo CID hash in shared memory, rather
156 * than synchronizing it just once at the start of parallelism, and (b) an
157 * alternative to heap_update()'s reliance on xmax for mutual exclusion.
158 * INSERT may have no such troubles, but we forbid it to simplify the
161 * We have lower-level defenses in CommandCounterIncrement and elsewhere
162 * against performing unsafe operations in parallel mode, but this gives a
163 * more user-friendly error message.
165 if ((XactReadOnly
|| IsInParallelMode()) &&
166 !(eflags
& EXEC_FLAG_EXPLAIN_ONLY
))
167 ExecCheckXactReadOnly(queryDesc
->plannedstmt
);
170 * Build EState, switch into per-query memory context for startup.
172 estate
= CreateExecutorState();
173 queryDesc
->estate
= estate
;
175 oldcontext
= MemoryContextSwitchTo(estate
->es_query_cxt
);
178 * Fill in external parameters, if any, from queryDesc; and allocate
179 * workspace for internal parameters
181 estate
->es_param_list_info
= queryDesc
->params
;
183 if (queryDesc
->plannedstmt
->paramExecTypes
!= NIL
)
187 nParamExec
= list_length(queryDesc
->plannedstmt
->paramExecTypes
);
188 estate
->es_param_exec_vals
= (ParamExecData
*)
189 palloc0(nParamExec
* sizeof(ParamExecData
));
192 /* We now require all callers to provide sourceText */
193 Assert(queryDesc
->sourceText
!= NULL
);
194 estate
->es_sourceText
= queryDesc
->sourceText
;
197 * Fill in the query environment, if any, from queryDesc.
199 estate
->es_queryEnv
= queryDesc
->queryEnv
;
202 * If non-read-only query, set the command ID to mark output tuples with
204 switch (queryDesc
->operation
)
209 * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
212 if (queryDesc
->plannedstmt
->rowMarks
!= NIL
||
213 queryDesc
->plannedstmt
->hasModifyingCTE
)
214 estate
->es_output_cid
= GetCurrentCommandId(true);
217 * A SELECT without modifying CTEs can't possibly queue triggers,
218 * so force skip-triggers mode. This is just a marginal efficiency
219 * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
220 * all that expensive, but we might as well do it.
222 if (!queryDesc
->plannedstmt
->hasModifyingCTE
)
223 eflags
|= EXEC_FLAG_SKIP_TRIGGERS
;
230 estate
->es_output_cid
= GetCurrentCommandId(true);
234 elog(ERROR
, "unrecognized operation code: %d",
235 (int) queryDesc
->operation
);
240 * Copy other important information into the EState
242 estate
->es_snapshot
= RegisterSnapshot(queryDesc
->snapshot
);
243 estate
->es_crosscheck_snapshot
= RegisterSnapshot(queryDesc
->crosscheck_snapshot
);
244 estate
->es_top_eflags
= eflags
;
245 estate
->es_instrument
= queryDesc
->instrument_options
;
246 estate
->es_jit_flags
= queryDesc
->plannedstmt
->jitFlags
;
249 * Set up an AFTER-trigger statement context, unless told not to, or
250 * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
252 if (!(eflags
& (EXEC_FLAG_SKIP_TRIGGERS
| EXEC_FLAG_EXPLAIN_ONLY
)))
253 AfterTriggerBeginQuery();
256 * Initialize the plan state tree
258 InitPlan(queryDesc
, eflags
);
260 MemoryContextSwitchTo(oldcontext
);
263 /* ----------------------------------------------------------------
266 * This is the main routine of the executor module. It accepts
267 * the query descriptor from the traffic cop and executes the
270 * ExecutorStart must have been called already.
272 * If direction is NoMovementScanDirection then nothing is done
273 * except to start up/shut down the destination. Otherwise,
274 * we retrieve up to 'count' tuples in the specified direction.
276 * Note: count = 0 is interpreted as no portal limit, i.e., run to
277 * completion. Also note that the count limit is only applied to
278 * retrieved tuples, not for instance to those inserted/updated/deleted
279 * by a ModifyTable plan node.
281 * There is no return value, but output tuples (if any) are sent to
282 * the destination receiver specified in the QueryDesc; and the number
283 * of tuples processed at the top level can be found in
284 * estate->es_processed. The total number of tuples processed in all
285 * the ExecutorRun calls can be found in estate->es_total_processed.
287 * We provide a function hook variable that lets loadable plugins
288 * get control when ExecutorRun is called. Such a plugin would
289 * normally call standard_ExecutorRun().
291 * ----------------------------------------------------------------
294 ExecutorRun(QueryDesc
*queryDesc
,
295 ScanDirection direction
, uint64 count
)
297 if (ExecutorRun_hook
)
298 (*ExecutorRun_hook
) (queryDesc
, direction
, count
);
300 standard_ExecutorRun(queryDesc
, direction
, count
);
304 standard_ExecutorRun(QueryDesc
*queryDesc
,
305 ScanDirection direction
, uint64 count
)
311 MemoryContext oldcontext
;
314 Assert(queryDesc
!= NULL
);
316 estate
= queryDesc
->estate
;
318 Assert(estate
!= NULL
);
319 Assert(!(estate
->es_top_eflags
& EXEC_FLAG_EXPLAIN_ONLY
));
321 /* caller must ensure the query's snapshot is active */
322 Assert(GetActiveSnapshot() == estate
->es_snapshot
);
325 * Switch into per-query memory context
327 oldcontext
= MemoryContextSwitchTo(estate
->es_query_cxt
);
329 /* Allow instrumentation of Executor overall runtime */
330 if (queryDesc
->totaltime
)
331 InstrStartNode(queryDesc
->totaltime
);
334 * extract information from the query descriptor and the query feature.
336 operation
= queryDesc
->operation
;
337 dest
= queryDesc
->dest
;
340 * startup tuple receiver, if we will be emitting tuples
342 estate
->es_processed
= 0;
344 sendTuples
= (operation
== CMD_SELECT
||
345 queryDesc
->plannedstmt
->hasReturning
);
348 dest
->rStartup(dest
, operation
, queryDesc
->tupDesc
);
351 * Run plan, unless direction is NoMovement.
353 * Note: pquery.c selects NoMovement if a prior call already reached
354 * end-of-data in the user-specified fetch direction. This is important
355 * because various parts of the executor can misbehave if called again
356 * after reporting EOF. For example, heapam.c would actually restart a
357 * heapscan and return all its data afresh. There is also some doubt
358 * about whether a parallel plan would operate properly if an additional,
359 * necessarily non-parallel execution request occurs after completing a
360 * parallel execution. (That case should work, but it's untested.)
362 if (!ScanDirectionIsNoMovement(direction
))
363 ExecutePlan(queryDesc
,
371 * Update es_total_processed to keep track of the number of tuples
372 * processed across multiple ExecutorRun() calls.
374 estate
->es_total_processed
+= estate
->es_processed
;
377 * shutdown tuple receiver, if we started it
380 dest
->rShutdown(dest
);
382 if (queryDesc
->totaltime
)
383 InstrStopNode(queryDesc
->totaltime
, estate
->es_processed
);
385 MemoryContextSwitchTo(oldcontext
);
388 /* ----------------------------------------------------------------
391 * This routine must be called after the last ExecutorRun call.
392 * It performs cleanup such as firing AFTER triggers. It is
393 * separate from ExecutorEnd because EXPLAIN ANALYZE needs to
394 * include these actions in the total runtime.
396 * We provide a function hook variable that lets loadable plugins
397 * get control when ExecutorFinish is called. Such a plugin would
398 * normally call standard_ExecutorFinish().
400 * ----------------------------------------------------------------
403 ExecutorFinish(QueryDesc
*queryDesc
)
405 if (ExecutorFinish_hook
)
406 (*ExecutorFinish_hook
) (queryDesc
);
408 standard_ExecutorFinish(queryDesc
);
412 standard_ExecutorFinish(QueryDesc
*queryDesc
)
415 MemoryContext oldcontext
;
418 Assert(queryDesc
!= NULL
);
420 estate
= queryDesc
->estate
;
422 Assert(estate
!= NULL
);
423 Assert(!(estate
->es_top_eflags
& EXEC_FLAG_EXPLAIN_ONLY
));
425 /* This should be run once and only once per Executor instance */
426 Assert(!estate
->es_finished
);
428 /* Switch into per-query memory context */
429 oldcontext
= MemoryContextSwitchTo(estate
->es_query_cxt
);
431 /* Allow instrumentation of Executor overall runtime */
432 if (queryDesc
->totaltime
)
433 InstrStartNode(queryDesc
->totaltime
);
435 /* Run ModifyTable nodes to completion */
436 ExecPostprocessPlan(estate
);
438 /* Execute queued AFTER triggers, unless told not to */
439 if (!(estate
->es_top_eflags
& EXEC_FLAG_SKIP_TRIGGERS
))
440 AfterTriggerEndQuery(estate
);
442 if (queryDesc
->totaltime
)
443 InstrStopNode(queryDesc
->totaltime
, 0);
445 MemoryContextSwitchTo(oldcontext
);
447 estate
->es_finished
= true;
450 /* ----------------------------------------------------------------
453 * This routine must be called at the end of execution of any
456 * We provide a function hook variable that lets loadable plugins
457 * get control when ExecutorEnd is called. Such a plugin would
458 * normally call standard_ExecutorEnd().
460 * ----------------------------------------------------------------
463 ExecutorEnd(QueryDesc
*queryDesc
)
465 if (ExecutorEnd_hook
)
466 (*ExecutorEnd_hook
) (queryDesc
);
468 standard_ExecutorEnd(queryDesc
);
472 standard_ExecutorEnd(QueryDesc
*queryDesc
)
475 MemoryContext oldcontext
;
478 Assert(queryDesc
!= NULL
);
480 estate
= queryDesc
->estate
;
482 Assert(estate
!= NULL
);
484 if (estate
->es_parallel_workers_to_launch
> 0)
485 pgstat_update_parallel_workers_stats((PgStat_Counter
) estate
->es_parallel_workers_to_launch
,
486 (PgStat_Counter
) estate
->es_parallel_workers_launched
);
489 * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
490 * Assert is needed because ExecutorFinish is new as of 9.1, and callers
491 * might forget to call it.
493 Assert(estate
->es_finished
||
494 (estate
->es_top_eflags
& EXEC_FLAG_EXPLAIN_ONLY
));
497 * Switch into per-query memory context to run ExecEndPlan
499 oldcontext
= MemoryContextSwitchTo(estate
->es_query_cxt
);
501 ExecEndPlan(queryDesc
->planstate
, estate
);
503 /* do away with our snapshots */
504 UnregisterSnapshot(estate
->es_snapshot
);
505 UnregisterSnapshot(estate
->es_crosscheck_snapshot
);
508 * Must switch out of context before destroying it
510 MemoryContextSwitchTo(oldcontext
);
513 * Release EState and per-query memory context. This should release
514 * everything the executor has allocated.
516 FreeExecutorState(estate
);
518 /* Reset queryDesc fields that no longer point to anything */
519 queryDesc
->tupDesc
= NULL
;
520 queryDesc
->estate
= NULL
;
521 queryDesc
->planstate
= NULL
;
522 queryDesc
->totaltime
= NULL
;
525 /* ----------------------------------------------------------------
528 * This routine may be called on an open queryDesc to rewind it
530 * ----------------------------------------------------------------
533 ExecutorRewind(QueryDesc
*queryDesc
)
536 MemoryContext oldcontext
;
539 Assert(queryDesc
!= NULL
);
541 estate
= queryDesc
->estate
;
543 Assert(estate
!= NULL
);
545 /* It's probably not sensible to rescan updating queries */
546 Assert(queryDesc
->operation
== CMD_SELECT
);
549 * Switch into per-query memory context
551 oldcontext
= MemoryContextSwitchTo(estate
->es_query_cxt
);
556 ExecReScan(queryDesc
->planstate
);
558 MemoryContextSwitchTo(oldcontext
);
563 * ExecCheckPermissions
564 * Check access permissions of relations mentioned in a query
566 * Returns true if permissions are adequate. Otherwise, throws an appropriate
567 * error if ereport_on_violation is true, or simply returns false otherwise.
569 * Note that this does NOT address row-level security policies (aka: RLS). If
570 * rows will be returned to the user as a result of this permission check
571 * passing, then RLS also needs to be consulted (and check_enable_rls()).
573 * See rewrite/rowsecurity.c.
575 * NB: rangeTable is no longer used by us, but kept around for the hooks that
576 * might still want to look at the RTEs.
579 ExecCheckPermissions(List
*rangeTable
, List
*rteperminfos
,
580 bool ereport_on_violation
)
585 #ifdef USE_ASSERT_CHECKING
586 Bitmapset
*indexset
= NULL
;
588 /* Check that rteperminfos is consistent with rangeTable */
589 foreach(l
, rangeTable
)
591 RangeTblEntry
*rte
= lfirst_node(RangeTblEntry
, l
);
593 if (rte
->perminfoindex
!= 0)
598 * Only relation RTEs and subquery RTEs that were once relation
599 * RTEs (views) have their perminfoindex set.
601 Assert(rte
->rtekind
== RTE_RELATION
||
602 (rte
->rtekind
== RTE_SUBQUERY
&&
603 rte
->relkind
== RELKIND_VIEW
));
605 (void) getRTEPermissionInfo(rteperminfos
, rte
);
606 /* Many-to-one mapping not allowed */
607 Assert(!bms_is_member(rte
->perminfoindex
, indexset
));
608 indexset
= bms_add_member(indexset
, rte
->perminfoindex
);
612 /* All rteperminfos are referenced */
613 Assert(bms_num_members(indexset
) == list_length(rteperminfos
));
616 foreach(l
, rteperminfos
)
618 RTEPermissionInfo
*perminfo
= lfirst_node(RTEPermissionInfo
, l
);
620 Assert(OidIsValid(perminfo
->relid
));
621 result
= ExecCheckOneRelPerms(perminfo
);
624 if (ereport_on_violation
)
625 aclcheck_error(ACLCHECK_NO_PRIV
,
626 get_relkind_objtype(get_rel_relkind(perminfo
->relid
)),
627 get_rel_name(perminfo
->relid
));
632 if (ExecutorCheckPerms_hook
)
633 result
= (*ExecutorCheckPerms_hook
) (rangeTable
, rteperminfos
,
634 ereport_on_violation
);
639 * ExecCheckOneRelPerms
640 * Check access permissions for a single relation.
643 ExecCheckOneRelPerms(RTEPermissionInfo
*perminfo
)
645 AclMode requiredPerms
;
647 AclMode remainingPerms
;
649 Oid relOid
= perminfo
->relid
;
651 requiredPerms
= perminfo
->requiredPerms
;
652 Assert(requiredPerms
!= 0);
655 * userid to check as: current user unless we have a setuid indication.
657 * Note: GetUserId() is presently fast enough that there's no harm in
658 * calling it separately for each relation. If that stops being true, we
659 * could call it once in ExecCheckPermissions and pass the userid down
660 * from there. But for now, no need for the extra clutter.
662 userid
= OidIsValid(perminfo
->checkAsUser
) ?
663 perminfo
->checkAsUser
: GetUserId();
666 * We must have *all* the requiredPerms bits, but some of the bits can be
667 * satisfied from column-level rather than relation-level permissions.
668 * First, remove any bits that are satisfied by relation permissions.
670 relPerms
= pg_class_aclmask(relOid
, userid
, requiredPerms
, ACLMASK_ALL
);
671 remainingPerms
= requiredPerms
& ~relPerms
;
672 if (remainingPerms
!= 0)
677 * If we lack any permissions that exist only as relation permissions,
678 * we can fail straight away.
680 if (remainingPerms
& ~(ACL_SELECT
| ACL_INSERT
| ACL_UPDATE
))
684 * Check to see if we have the needed privileges at column level.
686 * Note: failures just report a table-level error; it would be nicer
687 * to report a column-level error if we have some but not all of the
690 if (remainingPerms
& ACL_SELECT
)
693 * When the query doesn't explicitly reference any columns (for
694 * example, SELECT COUNT(*) FROM table), allow the query if we
695 * have SELECT on any column of the rel, as per SQL spec.
697 if (bms_is_empty(perminfo
->selectedCols
))
699 if (pg_attribute_aclcheck_all(relOid
, userid
, ACL_SELECT
,
700 ACLMASK_ANY
) != ACLCHECK_OK
)
704 while ((col
= bms_next_member(perminfo
->selectedCols
, col
)) >= 0)
706 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
707 AttrNumber attno
= col
+ FirstLowInvalidHeapAttributeNumber
;
709 if (attno
== InvalidAttrNumber
)
711 /* Whole-row reference, must have priv on all cols */
712 if (pg_attribute_aclcheck_all(relOid
, userid
, ACL_SELECT
,
713 ACLMASK_ALL
) != ACLCHECK_OK
)
718 if (pg_attribute_aclcheck(relOid
, attno
, userid
,
719 ACL_SELECT
) != ACLCHECK_OK
)
726 * Basically the same for the mod columns, for both INSERT and UPDATE
727 * privilege as specified by remainingPerms.
729 if (remainingPerms
& ACL_INSERT
&&
730 !ExecCheckPermissionsModified(relOid
,
732 perminfo
->insertedCols
,
736 if (remainingPerms
& ACL_UPDATE
&&
737 !ExecCheckPermissionsModified(relOid
,
739 perminfo
->updatedCols
,
747 * ExecCheckPermissionsModified
748 * Check INSERT or UPDATE access permissions for a single relation (these
749 * are processed uniformly).
752 ExecCheckPermissionsModified(Oid relOid
, Oid userid
, Bitmapset
*modifiedCols
,
753 AclMode requiredPerms
)
758 * When the query doesn't explicitly update any columns, allow the query
759 * if we have permission on any column of the rel. This is to handle
760 * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
762 if (bms_is_empty(modifiedCols
))
764 if (pg_attribute_aclcheck_all(relOid
, userid
, requiredPerms
,
765 ACLMASK_ANY
) != ACLCHECK_OK
)
769 while ((col
= bms_next_member(modifiedCols
, col
)) >= 0)
771 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
772 AttrNumber attno
= col
+ FirstLowInvalidHeapAttributeNumber
;
774 if (attno
== InvalidAttrNumber
)
776 /* whole-row reference can't happen here */
777 elog(ERROR
, "whole-row update is not implemented");
781 if (pg_attribute_aclcheck(relOid
, attno
, userid
,
782 requiredPerms
) != ACLCHECK_OK
)
790 * Check that the query does not imply any writes to non-temp tables;
791 * unless we're in parallel mode, in which case don't even allow writes
794 * Note: in a Hot Standby this would need to reject writes to temp
795 * tables just as we do in parallel mode; but an HS standby can't have created
796 * any temp tables in the first place, so no need to check that.
799 ExecCheckXactReadOnly(PlannedStmt
*plannedstmt
)
804 * Fail if write permissions are requested in parallel mode for table
805 * (temp or non-temp), otherwise fail for any non-temp table.
807 foreach(l
, plannedstmt
->permInfos
)
809 RTEPermissionInfo
*perminfo
= lfirst_node(RTEPermissionInfo
, l
);
811 if ((perminfo
->requiredPerms
& (~ACL_SELECT
)) == 0)
814 if (isTempNamespace(get_rel_namespace(perminfo
->relid
)))
817 PreventCommandIfReadOnly(CreateCommandName((Node
*) plannedstmt
));
820 if (plannedstmt
->commandType
!= CMD_SELECT
|| plannedstmt
->hasModifyingCTE
)
821 PreventCommandIfParallelMode(CreateCommandName((Node
*) plannedstmt
));
825 /* ----------------------------------------------------------------
828 * Initializes the query plan: open files, allocate storage
829 * and start up the rule manager
830 * ----------------------------------------------------------------
833 InitPlan(QueryDesc
*queryDesc
, int eflags
)
835 CmdType operation
= queryDesc
->operation
;
836 PlannedStmt
*plannedstmt
= queryDesc
->plannedstmt
;
837 Plan
*plan
= plannedstmt
->planTree
;
838 List
*rangeTable
= plannedstmt
->rtable
;
839 EState
*estate
= queryDesc
->estate
;
840 PlanState
*planstate
;
846 * Do permissions checks
848 ExecCheckPermissions(rangeTable
, plannedstmt
->permInfos
, true);
851 * initialize the node's execution state
853 ExecInitRangeTable(estate
, rangeTable
, plannedstmt
->permInfos
);
855 estate
->es_plannedstmt
= plannedstmt
;
858 * Next, build the ExecRowMark array from the PlanRowMark(s), if any.
860 if (plannedstmt
->rowMarks
)
862 estate
->es_rowmarks
= (ExecRowMark
**)
863 palloc0(estate
->es_range_table_size
* sizeof(ExecRowMark
*));
864 foreach(l
, plannedstmt
->rowMarks
)
866 PlanRowMark
*rc
= (PlanRowMark
*) lfirst(l
);
871 /* ignore "parent" rowmarks; they are irrelevant at runtime */
875 /* get relation's OID (will produce InvalidOid if subquery) */
876 relid
= exec_rt_fetch(rc
->rti
, estate
)->relid
;
878 /* open relation, if we need to access it for this mark type */
879 switch (rc
->markType
)
881 case ROW_MARK_EXCLUSIVE
:
882 case ROW_MARK_NOKEYEXCLUSIVE
:
884 case ROW_MARK_KEYSHARE
:
885 case ROW_MARK_REFERENCE
:
886 relation
= ExecGetRangeTableRelation(estate
, rc
->rti
);
889 /* no physical table access is required */
893 elog(ERROR
, "unrecognized markType: %d", rc
->markType
);
894 relation
= NULL
; /* keep compiler quiet */
898 /* Check that relation is a legal target for marking */
900 CheckValidRowMarkRel(relation
, rc
->markType
);
902 erm
= (ExecRowMark
*) palloc(sizeof(ExecRowMark
));
903 erm
->relation
= relation
;
906 erm
->prti
= rc
->prti
;
907 erm
->rowmarkId
= rc
->rowmarkId
;
908 erm
->markType
= rc
->markType
;
909 erm
->strength
= rc
->strength
;
910 erm
->waitPolicy
= rc
->waitPolicy
;
911 erm
->ermActive
= false;
912 ItemPointerSetInvalid(&(erm
->curCtid
));
913 erm
->ermExtra
= NULL
;
915 Assert(erm
->rti
> 0 && erm
->rti
<= estate
->es_range_table_size
&&
916 estate
->es_rowmarks
[erm
->rti
- 1] == NULL
);
918 estate
->es_rowmarks
[erm
->rti
- 1] = erm
;
923 * Initialize the executor's tuple table to empty.
925 estate
->es_tupleTable
= NIL
;
927 /* signal that this EState is not used for EPQ */
928 estate
->es_epq_active
= NULL
;
931 * Initialize private state information for each SubPlan. We must do this
932 * before running ExecInitNode on the main query tree, since
933 * ExecInitSubPlan expects to be able to find these entries.
935 Assert(estate
->es_subplanstates
== NIL
);
936 i
= 1; /* subplan indices count from 1 */
937 foreach(l
, plannedstmt
->subplans
)
939 Plan
*subplan
= (Plan
*) lfirst(l
);
940 PlanState
*subplanstate
;
944 * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
945 * it is a parameterless subplan (not initplan), we suggest that it be
946 * prepared to handle REWIND efficiently; otherwise there is no need.
949 & ~(EXEC_FLAG_REWIND
| EXEC_FLAG_BACKWARD
| EXEC_FLAG_MARK
);
950 if (bms_is_member(i
, plannedstmt
->rewindPlanIDs
))
951 sp_eflags
|= EXEC_FLAG_REWIND
;
953 subplanstate
= ExecInitNode(subplan
, estate
, sp_eflags
);
955 estate
->es_subplanstates
= lappend(estate
->es_subplanstates
,
962 * Initialize the private state information for all the nodes in the query
963 * tree. This opens files, allocates storage and leaves us ready to start
966 planstate
= ExecInitNode(plan
, estate
, eflags
);
969 * Get the tuple descriptor describing the type of tuples to return.
971 tupType
= ExecGetResultType(planstate
);
974 * Initialize the junk filter if needed. SELECT queries need a filter if
975 * there are any junk attrs in the top-level tlist.
977 if (operation
== CMD_SELECT
)
979 bool junk_filter_needed
= false;
982 foreach(tlist
, plan
->targetlist
)
984 TargetEntry
*tle
= (TargetEntry
*) lfirst(tlist
);
988 junk_filter_needed
= true;
993 if (junk_filter_needed
)
996 TupleTableSlot
*slot
;
998 slot
= ExecInitExtraTupleSlot(estate
, NULL
, &TTSOpsVirtual
);
999 j
= ExecInitJunkFilter(planstate
->plan
->targetlist
,
1001 estate
->es_junkFilter
= j
;
1003 /* Want to return the cleaned tuple type */
1004 tupType
= j
->jf_cleanTupType
;
1008 queryDesc
->tupDesc
= tupType
;
1009 queryDesc
->planstate
= planstate
;
1013 * Check that a proposed result relation is a legal target for the operation
1015 * Generally the parser and/or planner should have noticed any such mistake
1016 * already, but let's make sure.
1018 * For MERGE, mergeActions is the list of actions that may be performed. The
1019 * result relation is required to support every action, regardless of whether
1020 * or not they are all executed.
1022 * Note: when changing this function, you probably also need to look at
1023 * CheckValidRowMarkRel.
1026 CheckValidResultRel(ResultRelInfo
*resultRelInfo
, CmdType operation
,
1029 Relation resultRel
= resultRelInfo
->ri_RelationDesc
;
1030 FdwRoutine
*fdwroutine
;
1032 /* Expect a fully-formed ResultRelInfo from InitResultRelInfo(). */
1033 Assert(resultRelInfo
->ri_needLockTagTuple
==
1034 IsInplaceUpdateRelation(resultRel
));
1036 switch (resultRel
->rd_rel
->relkind
)
1038 case RELKIND_RELATION
:
1039 case RELKIND_PARTITIONED_TABLE
:
1040 CheckCmdReplicaIdentity(resultRel
, operation
);
1042 case RELKIND_SEQUENCE
:
1044 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1045 errmsg("cannot change sequence \"%s\"",
1046 RelationGetRelationName(resultRel
))));
1048 case RELKIND_TOASTVALUE
:
1050 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1051 errmsg("cannot change TOAST relation \"%s\"",
1052 RelationGetRelationName(resultRel
))));
1057 * Okay only if there's a suitable INSTEAD OF trigger. Otherwise,
1058 * complain, but omit errdetail because we haven't got the
1059 * information handy (and given that it really shouldn't happen,
1060 * it's not worth great exertion to get).
1062 if (!view_has_instead_trigger(resultRel
, operation
, mergeActions
))
1063 error_view_not_updatable(resultRel
, operation
, mergeActions
,
1066 case RELKIND_MATVIEW
:
1067 if (!MatViewIncrementalMaintenanceIsEnabled())
1069 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1070 errmsg("cannot change materialized view \"%s\"",
1071 RelationGetRelationName(resultRel
))));
1073 case RELKIND_FOREIGN_TABLE
:
1074 /* Okay only if the FDW supports it */
1075 fdwroutine
= resultRelInfo
->ri_FdwRoutine
;
1079 if (fdwroutine
->ExecForeignInsert
== NULL
)
1081 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
1082 errmsg("cannot insert into foreign table \"%s\"",
1083 RelationGetRelationName(resultRel
))));
1084 if (fdwroutine
->IsForeignRelUpdatable
!= NULL
&&
1085 (fdwroutine
->IsForeignRelUpdatable(resultRel
) & (1 << CMD_INSERT
)) == 0)
1087 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE
),
1088 errmsg("foreign table \"%s\" does not allow inserts",
1089 RelationGetRelationName(resultRel
))));
1092 if (fdwroutine
->ExecForeignUpdate
== NULL
)
1094 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
1095 errmsg("cannot update foreign table \"%s\"",
1096 RelationGetRelationName(resultRel
))));
1097 if (fdwroutine
->IsForeignRelUpdatable
!= NULL
&&
1098 (fdwroutine
->IsForeignRelUpdatable(resultRel
) & (1 << CMD_UPDATE
)) == 0)
1100 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE
),
1101 errmsg("foreign table \"%s\" does not allow updates",
1102 RelationGetRelationName(resultRel
))));
1105 if (fdwroutine
->ExecForeignDelete
== NULL
)
1107 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
1108 errmsg("cannot delete from foreign table \"%s\"",
1109 RelationGetRelationName(resultRel
))));
1110 if (fdwroutine
->IsForeignRelUpdatable
!= NULL
&&
1111 (fdwroutine
->IsForeignRelUpdatable(resultRel
) & (1 << CMD_DELETE
)) == 0)
1113 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE
),
1114 errmsg("foreign table \"%s\" does not allow deletes",
1115 RelationGetRelationName(resultRel
))));
1118 elog(ERROR
, "unrecognized CmdType: %d", (int) operation
);
1124 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1125 errmsg("cannot change relation \"%s\"",
1126 RelationGetRelationName(resultRel
))));
1132 * Check that a proposed rowmark target relation is a legal target
1134 * In most cases parser and/or planner should have noticed this already, but
1135 * they don't cover all cases.
1138 CheckValidRowMarkRel(Relation rel
, RowMarkType markType
)
1140 FdwRoutine
*fdwroutine
;
1142 switch (rel
->rd_rel
->relkind
)
1144 case RELKIND_RELATION
:
1145 case RELKIND_PARTITIONED_TABLE
:
1148 case RELKIND_SEQUENCE
:
1149 /* Must disallow this because we don't vacuum sequences */
1151 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1152 errmsg("cannot lock rows in sequence \"%s\"",
1153 RelationGetRelationName(rel
))));
1155 case RELKIND_TOASTVALUE
:
1156 /* We could allow this, but there seems no good reason to */
1158 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1159 errmsg("cannot lock rows in TOAST relation \"%s\"",
1160 RelationGetRelationName(rel
))));
1163 /* Should not get here; planner should have expanded the view */
1165 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1166 errmsg("cannot lock rows in view \"%s\"",
1167 RelationGetRelationName(rel
))));
1169 case RELKIND_MATVIEW
:
1170 /* Allow referencing a matview, but not actual locking clauses */
1171 if (markType
!= ROW_MARK_REFERENCE
)
1173 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1174 errmsg("cannot lock rows in materialized view \"%s\"",
1175 RelationGetRelationName(rel
))));
1177 case RELKIND_FOREIGN_TABLE
:
1178 /* Okay only if the FDW supports it */
1179 fdwroutine
= GetFdwRoutineForRelation(rel
, false);
1180 if (fdwroutine
->RefetchForeignRow
== NULL
)
1182 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
1183 errmsg("cannot lock rows in foreign table \"%s\"",
1184 RelationGetRelationName(rel
))));
1188 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1189 errmsg("cannot lock rows in relation \"%s\"",
1190 RelationGetRelationName(rel
))));
1196 * Initialize ResultRelInfo data for one result relation
1198 * Caution: before Postgres 9.1, this function included the relkind checking
1199 * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1200 * appropriate. Be sure callers cover those needs.
1203 InitResultRelInfo(ResultRelInfo
*resultRelInfo
,
1204 Relation resultRelationDesc
,
1205 Index resultRelationIndex
,
1206 ResultRelInfo
*partition_root_rri
,
1207 int instrument_options
)
1209 MemSet(resultRelInfo
, 0, sizeof(ResultRelInfo
));
1210 resultRelInfo
->type
= T_ResultRelInfo
;
1211 resultRelInfo
->ri_RangeTableIndex
= resultRelationIndex
;
1212 resultRelInfo
->ri_RelationDesc
= resultRelationDesc
;
1213 resultRelInfo
->ri_NumIndices
= 0;
1214 resultRelInfo
->ri_IndexRelationDescs
= NULL
;
1215 resultRelInfo
->ri_IndexRelationInfo
= NULL
;
1216 resultRelInfo
->ri_needLockTagTuple
=
1217 IsInplaceUpdateRelation(resultRelationDesc
);
1218 /* make a copy so as not to depend on relcache info not changing... */
1219 resultRelInfo
->ri_TrigDesc
= CopyTriggerDesc(resultRelationDesc
->trigdesc
);
1220 if (resultRelInfo
->ri_TrigDesc
)
1222 int n
= resultRelInfo
->ri_TrigDesc
->numtriggers
;
1224 resultRelInfo
->ri_TrigFunctions
= (FmgrInfo
*)
1225 palloc0(n
* sizeof(FmgrInfo
));
1226 resultRelInfo
->ri_TrigWhenExprs
= (ExprState
**)
1227 palloc0(n
* sizeof(ExprState
*));
1228 if (instrument_options
)
1229 resultRelInfo
->ri_TrigInstrument
= InstrAlloc(n
, instrument_options
, false);
1233 resultRelInfo
->ri_TrigFunctions
= NULL
;
1234 resultRelInfo
->ri_TrigWhenExprs
= NULL
;
1235 resultRelInfo
->ri_TrigInstrument
= NULL
;
1237 if (resultRelationDesc
->rd_rel
->relkind
== RELKIND_FOREIGN_TABLE
)
1238 resultRelInfo
->ri_FdwRoutine
= GetFdwRoutineForRelation(resultRelationDesc
, true);
1240 resultRelInfo
->ri_FdwRoutine
= NULL
;
1242 /* The following fields are set later if needed */
1243 resultRelInfo
->ri_RowIdAttNo
= 0;
1244 resultRelInfo
->ri_extraUpdatedCols
= NULL
;
1245 resultRelInfo
->ri_projectNew
= NULL
;
1246 resultRelInfo
->ri_newTupleSlot
= NULL
;
1247 resultRelInfo
->ri_oldTupleSlot
= NULL
;
1248 resultRelInfo
->ri_projectNewInfoValid
= false;
1249 resultRelInfo
->ri_FdwState
= NULL
;
1250 resultRelInfo
->ri_usesFdwDirectModify
= false;
1251 resultRelInfo
->ri_ConstraintExprs
= NULL
;
1252 resultRelInfo
->ri_GeneratedExprsI
= NULL
;
1253 resultRelInfo
->ri_GeneratedExprsU
= NULL
;
1254 resultRelInfo
->ri_projectReturning
= NULL
;
1255 resultRelInfo
->ri_onConflictArbiterIndexes
= NIL
;
1256 resultRelInfo
->ri_onConflict
= NULL
;
1257 resultRelInfo
->ri_ReturningSlot
= NULL
;
1258 resultRelInfo
->ri_TrigOldSlot
= NULL
;
1259 resultRelInfo
->ri_TrigNewSlot
= NULL
;
1260 resultRelInfo
->ri_AllNullSlot
= NULL
;
1261 resultRelInfo
->ri_MergeActions
[MERGE_WHEN_MATCHED
] = NIL
;
1262 resultRelInfo
->ri_MergeActions
[MERGE_WHEN_NOT_MATCHED_BY_SOURCE
] = NIL
;
1263 resultRelInfo
->ri_MergeActions
[MERGE_WHEN_NOT_MATCHED_BY_TARGET
] = NIL
;
1264 resultRelInfo
->ri_MergeJoinCondition
= NULL
;
1267 * Only ExecInitPartitionInfo() and ExecInitPartitionDispatchInfo() pass
1268 * non-NULL partition_root_rri. For child relations that are part of the
1269 * initial query rather than being dynamically added by tuple routing,
1270 * this field is filled in ExecInitModifyTable().
1272 resultRelInfo
->ri_RootResultRelInfo
= partition_root_rri
;
1273 /* Set by ExecGetRootToChildMap */
1274 resultRelInfo
->ri_RootToChildMap
= NULL
;
1275 resultRelInfo
->ri_RootToChildMapValid
= false;
1276 /* Set by ExecInitRoutingInfo */
1277 resultRelInfo
->ri_PartitionTupleSlot
= NULL
;
1278 resultRelInfo
->ri_ChildToRootMap
= NULL
;
1279 resultRelInfo
->ri_ChildToRootMapValid
= false;
1280 resultRelInfo
->ri_CopyMultiInsertBuffer
= NULL
;
1284 * ExecGetTriggerResultRel
1285 * Get a ResultRelInfo for a trigger target relation.
1287 * Most of the time, triggers are fired on one of the result relations of the
1288 * query, and so we can just return a member of the es_result_relations array,
1289 * or the es_tuple_routing_result_relations list (if any). (Note: in self-join
1290 * situations there might be multiple members with the same OID; if so it
1291 * doesn't matter which one we pick.)
1293 * However, it is sometimes necessary to fire triggers on other relations;
1294 * this happens mainly when an RI update trigger queues additional triggers
1295 * on other relations, which will be processed in the context of the outer
1296 * query. For efficiency's sake, we want to have a ResultRelInfo for those
1297 * triggers too; that can avoid repeated re-opening of the relation. (It
1298 * also provides a way for EXPLAIN ANALYZE to report the runtimes of such
1299 * triggers.) So we make additional ResultRelInfo's as needed, and save them
1300 * in es_trig_target_relations.
1303 ExecGetTriggerResultRel(EState
*estate
, Oid relid
,
1304 ResultRelInfo
*rootRelInfo
)
1306 ResultRelInfo
*rInfo
;
1309 MemoryContext oldcontext
;
1311 /* Search through the query result relations */
1312 foreach(l
, estate
->es_opened_result_relations
)
1315 if (RelationGetRelid(rInfo
->ri_RelationDesc
) == relid
)
1320 * Search through the result relations that were created during tuple
1323 foreach(l
, estate
->es_tuple_routing_result_relations
)
1325 rInfo
= (ResultRelInfo
*) lfirst(l
);
1326 if (RelationGetRelid(rInfo
->ri_RelationDesc
) == relid
)
1330 /* Nope, but maybe we already made an extra ResultRelInfo for it */
1331 foreach(l
, estate
->es_trig_target_relations
)
1333 rInfo
= (ResultRelInfo
*) lfirst(l
);
1334 if (RelationGetRelid(rInfo
->ri_RelationDesc
) == relid
)
1337 /* Nope, so we need a new one */
1340 * Open the target relation's relcache entry. We assume that an
1341 * appropriate lock is still held by the backend from whenever the trigger
1342 * event got queued, so we need take no new lock here. Also, we need not
1343 * recheck the relkind, so no need for CheckValidResultRel.
1345 rel
= table_open(relid
, NoLock
);
1348 * Make the new entry in the right context.
1350 oldcontext
= MemoryContextSwitchTo(estate
->es_query_cxt
);
1351 rInfo
= makeNode(ResultRelInfo
);
1352 InitResultRelInfo(rInfo
,
1354 0, /* dummy rangetable index */
1356 estate
->es_instrument
);
1357 estate
->es_trig_target_relations
=
1358 lappend(estate
->es_trig_target_relations
, rInfo
);
1359 MemoryContextSwitchTo(oldcontext
);
1362 * Currently, we don't need any index information in ResultRelInfos used
1363 * only for triggers, so no need to call ExecOpenIndices.
1370 * Return the ancestor relations of a given leaf partition result relation
1371 * up to and including the query's root target relation.
1373 * These work much like the ones opened by ExecGetTriggerResultRel, except
1374 * that we need to keep them in a separate list.
1376 * These are closed by ExecCloseResultRelations.
1379 ExecGetAncestorResultRels(EState
*estate
, ResultRelInfo
*resultRelInfo
)
1381 ResultRelInfo
*rootRelInfo
= resultRelInfo
->ri_RootResultRelInfo
;
1382 Relation partRel
= resultRelInfo
->ri_RelationDesc
;
1385 if (!partRel
->rd_rel
->relispartition
)
1386 elog(ERROR
, "cannot find ancestors of a non-partition result relation");
1387 Assert(rootRelInfo
!= NULL
);
1388 rootRelOid
= RelationGetRelid(rootRelInfo
->ri_RelationDesc
);
1389 if (resultRelInfo
->ri_ancestorResultRels
== NIL
)
1392 List
*oids
= get_partition_ancestors(RelationGetRelid(partRel
));
1393 List
*ancResultRels
= NIL
;
1397 Oid ancOid
= lfirst_oid(lc
);
1399 ResultRelInfo
*rInfo
;
1402 * Ignore the root ancestor here, and use ri_RootResultRelInfo
1403 * (below) for it instead. Also, we stop climbing up the
1404 * hierarchy when we find the table that was mentioned in the
1407 if (ancOid
== rootRelOid
)
1411 * All ancestors up to the root target relation must have been
1412 * locked by the planner or AcquireExecutorLocks().
1414 ancRel
= table_open(ancOid
, NoLock
);
1415 rInfo
= makeNode(ResultRelInfo
);
1417 /* dummy rangetable index */
1418 InitResultRelInfo(rInfo
, ancRel
, 0, NULL
,
1419 estate
->es_instrument
);
1420 ancResultRels
= lappend(ancResultRels
, rInfo
);
1422 ancResultRels
= lappend(ancResultRels
, rootRelInfo
);
1423 resultRelInfo
->ri_ancestorResultRels
= ancResultRels
;
1426 /* We must have found some ancestor */
1427 Assert(resultRelInfo
->ri_ancestorResultRels
!= NIL
);
1429 return resultRelInfo
->ri_ancestorResultRels
;
1432 /* ----------------------------------------------------------------
1433 * ExecPostprocessPlan
1435 * Give plan nodes a final chance to execute before shutdown
1436 * ----------------------------------------------------------------
1439 ExecPostprocessPlan(EState
*estate
)
1444 * Make sure nodes run forward.
1446 estate
->es_direction
= ForwardScanDirection
;
1449 * Run any secondary ModifyTable nodes to completion, in case the main
1450 * query did not fetch all rows from them. (We do this to ensure that
1451 * such nodes have predictable results.)
1453 foreach(lc
, estate
->es_auxmodifytables
)
1455 PlanState
*ps
= (PlanState
*) lfirst(lc
);
1459 TupleTableSlot
*slot
;
1461 /* Reset the per-output-tuple exprcontext each time */
1462 ResetPerTupleExprContext(estate
);
1464 slot
= ExecProcNode(ps
);
1466 if (TupIsNull(slot
))
1472 /* ----------------------------------------------------------------
1475 * Cleans up the query plan -- closes files and frees up storage
1477 * NOTE: we are no longer very worried about freeing storage per se
1478 * in this code; FreeExecutorState should be guaranteed to release all
1479 * memory that needs to be released. What we are worried about doing
1480 * is closing relations and dropping buffer pins. Thus, for example,
1481 * tuple tables must be cleared or dropped to ensure pins are released.
1482 * ----------------------------------------------------------------
1485 ExecEndPlan(PlanState
*planstate
, EState
*estate
)
1490 * shut down the node-type-specific query processing
1492 ExecEndNode(planstate
);
1497 foreach(l
, estate
->es_subplanstates
)
1499 PlanState
*subplanstate
= (PlanState
*) lfirst(l
);
1501 ExecEndNode(subplanstate
);
1505 * destroy the executor's tuple table. Actually we only care about
1506 * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1507 * the TupleTableSlots, since the containing memory context is about to go
1510 ExecResetTupleTable(estate
->es_tupleTable
, false);
1513 * Close any Relations that have been opened for range table entries or
1516 ExecCloseResultRelations(estate
);
1517 ExecCloseRangeTableRelations(estate
);
1521 * Close any relations that have been opened for ResultRelInfos.
1524 ExecCloseResultRelations(EState
*estate
)
1529 * close indexes of result relation(s) if any. (Rels themselves are
1530 * closed in ExecCloseRangeTableRelations())
1532 * In addition, close the stub RTs that may be in each resultrel's
1533 * ri_ancestorResultRels.
1535 foreach(l
, estate
->es_opened_result_relations
)
1537 ResultRelInfo
*resultRelInfo
= lfirst(l
);
1540 ExecCloseIndices(resultRelInfo
);
1541 foreach(lc
, resultRelInfo
->ri_ancestorResultRels
)
1543 ResultRelInfo
*rInfo
= lfirst(lc
);
1546 * Ancestors with RTI > 0 (should only be the root ancestor) are
1547 * closed by ExecCloseRangeTableRelations.
1549 if (rInfo
->ri_RangeTableIndex
> 0)
1552 table_close(rInfo
->ri_RelationDesc
, NoLock
);
1556 /* Close any relations that have been opened by ExecGetTriggerResultRel(). */
1557 foreach(l
, estate
->es_trig_target_relations
)
1559 ResultRelInfo
*resultRelInfo
= (ResultRelInfo
*) lfirst(l
);
1562 * Assert this is a "dummy" ResultRelInfo, see above. Otherwise we
1563 * might be issuing a duplicate close against a Relation opened by
1564 * ExecGetRangeTableRelation.
1566 Assert(resultRelInfo
->ri_RangeTableIndex
== 0);
1569 * Since ExecGetTriggerResultRel doesn't call ExecOpenIndices for
1570 * these rels, we needn't call ExecCloseIndices either.
1572 Assert(resultRelInfo
->ri_NumIndices
== 0);
1574 table_close(resultRelInfo
->ri_RelationDesc
, NoLock
);
1579 * Close all relations opened by ExecGetRangeTableRelation().
1581 * We do not release any locks we might hold on those rels.
1584 ExecCloseRangeTableRelations(EState
*estate
)
1588 for (i
= 0; i
< estate
->es_range_table_size
; i
++)
1590 if (estate
->es_relations
[i
])
1591 table_close(estate
->es_relations
[i
], NoLock
);
1595 /* ----------------------------------------------------------------
1598 * Processes the query plan until we have retrieved 'numberTuples' tuples,
1599 * moving in the specified direction.
1601 * Runs to completion if numberTuples is 0
1602 * ----------------------------------------------------------------
1605 ExecutePlan(QueryDesc
*queryDesc
,
1608 uint64 numberTuples
,
1609 ScanDirection direction
,
1612 EState
*estate
= queryDesc
->estate
;
1613 PlanState
*planstate
= queryDesc
->planstate
;
1614 bool use_parallel_mode
;
1615 TupleTableSlot
*slot
;
1616 uint64 current_tuple_count
;
1619 * initialize local variables
1621 current_tuple_count
= 0;
1624 * Set the direction.
1626 estate
->es_direction
= direction
;
1629 * Set up parallel mode if appropriate.
1631 * Parallel mode only supports complete execution of a plan. If we've
1632 * already partially executed it, or if the caller asks us to exit early,
1633 * we must force the plan to run without parallelism.
1635 if (queryDesc
->already_executed
|| numberTuples
!= 0)
1636 use_parallel_mode
= false;
1638 use_parallel_mode
= queryDesc
->plannedstmt
->parallelModeNeeded
;
1639 queryDesc
->already_executed
= true;
1641 estate
->es_use_parallel_mode
= use_parallel_mode
;
1642 if (use_parallel_mode
)
1643 EnterParallelMode();
1646 * Loop until we've processed the proper number of tuples from the plan.
1650 /* Reset the per-output-tuple exprcontext */
1651 ResetPerTupleExprContext(estate
);
1654 * Execute the plan and obtain a tuple
1656 slot
= ExecProcNode(planstate
);
1659 * if the tuple is null, then we assume there is nothing more to
1660 * process so we just end the loop...
1662 if (TupIsNull(slot
))
1666 * If we have a junk filter, then project a new tuple with the junk
1669 * Store this new "clean" tuple in the junkfilter's resultSlot.
1670 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1671 * because that tuple slot has the wrong descriptor.)
1673 if (estate
->es_junkFilter
!= NULL
)
1674 slot
= ExecFilterJunk(estate
->es_junkFilter
, slot
);
1677 * If we are supposed to send the tuple somewhere, do so. (In
1678 * practice, this is probably always the case at this point.)
1683 * If we are not able to send the tuple, we assume the destination
1684 * has closed and no more tuples can be sent. If that's the case,
1687 if (!dest
->receiveSlot(slot
, dest
))
1692 * Count tuples processed, if this is a SELECT. (For other operation
1693 * types, the ModifyTable plan node must count the appropriate
1696 if (operation
== CMD_SELECT
)
1697 (estate
->es_processed
)++;
1700 * check our tuple count.. if we've processed the proper number then
1701 * quit, else loop again and process more tuples. Zero numberTuples
1704 current_tuple_count
++;
1705 if (numberTuples
&& numberTuples
== current_tuple_count
)
1710 * If we know we won't need to back up, we can release resources at this
1713 if (!(estate
->es_top_eflags
& EXEC_FLAG_BACKWARD
))
1714 ExecShutdownNode(planstate
);
1716 if (use_parallel_mode
)
1722 * ExecRelCheck --- check that tuple meets constraints for result relation
1724 * Returns NULL if OK, else name of failed check constraint
1727 ExecRelCheck(ResultRelInfo
*resultRelInfo
,
1728 TupleTableSlot
*slot
, EState
*estate
)
1730 Relation rel
= resultRelInfo
->ri_RelationDesc
;
1731 int ncheck
= rel
->rd_att
->constr
->num_check
;
1732 ConstrCheck
*check
= rel
->rd_att
->constr
->check
;
1733 ExprContext
*econtext
;
1734 MemoryContext oldContext
;
1738 * CheckConstraintFetch let this pass with only a warning, but now we
1739 * should fail rather than possibly failing to enforce an important
1742 if (ncheck
!= rel
->rd_rel
->relchecks
)
1743 elog(ERROR
, "%d pg_constraint record(s) missing for relation \"%s\"",
1744 rel
->rd_rel
->relchecks
- ncheck
, RelationGetRelationName(rel
));
1747 * If first time through for this result relation, build expression
1748 * nodetrees for rel's constraint expressions. Keep them in the per-query
1749 * memory context so they'll survive throughout the query.
1751 if (resultRelInfo
->ri_ConstraintExprs
== NULL
)
1753 oldContext
= MemoryContextSwitchTo(estate
->es_query_cxt
);
1754 resultRelInfo
->ri_ConstraintExprs
=
1755 (ExprState
**) palloc0(ncheck
* sizeof(ExprState
*));
1756 for (i
= 0; i
< ncheck
; i
++)
1760 /* Skip not enforced constraint */
1761 if (!check
[i
].ccenforced
)
1764 checkconstr
= stringToNode(check
[i
].ccbin
);
1765 resultRelInfo
->ri_ConstraintExprs
[i
] =
1766 ExecPrepareExpr(checkconstr
, estate
);
1768 MemoryContextSwitchTo(oldContext
);
1772 * We will use the EState's per-tuple context for evaluating constraint
1773 * expressions (creating it if it's not already there).
1775 econtext
= GetPerTupleExprContext(estate
);
1777 /* Arrange for econtext's scan tuple to be the tuple under test */
1778 econtext
->ecxt_scantuple
= slot
;
1780 /* And evaluate the constraints */
1781 for (i
= 0; i
< ncheck
; i
++)
1783 ExprState
*checkconstr
= resultRelInfo
->ri_ConstraintExprs
[i
];
1786 * NOTE: SQL specifies that a NULL result from a constraint expression
1787 * is not to be treated as a failure. Therefore, use ExecCheck not
1790 if (checkconstr
&& !ExecCheck(checkconstr
, econtext
))
1791 return check
[i
].ccname
;
1794 /* NULL result means no error */
1799 * ExecPartitionCheck --- check that tuple meets the partition constraint.
1801 * Returns true if it meets the partition constraint. If the constraint
1802 * fails and we're asked to emit an error, do so and don't return; otherwise
1806 ExecPartitionCheck(ResultRelInfo
*resultRelInfo
, TupleTableSlot
*slot
,
1807 EState
*estate
, bool emitError
)
1809 ExprContext
*econtext
;
1813 * If first time through, build expression state tree for the partition
1814 * check expression. (In the corner case where the partition check
1815 * expression is empty, ie there's a default partition and nothing else,
1816 * we'll be fooled into executing this code each time through. But it's
1817 * pretty darn cheap in that case, so we don't worry about it.)
1819 if (resultRelInfo
->ri_PartitionCheckExpr
== NULL
)
1822 * Ensure that the qual tree and prepared expression are in the
1823 * query-lifespan context.
1825 MemoryContext oldcxt
= MemoryContextSwitchTo(estate
->es_query_cxt
);
1826 List
*qual
= RelationGetPartitionQual(resultRelInfo
->ri_RelationDesc
);
1828 resultRelInfo
->ri_PartitionCheckExpr
= ExecPrepareCheck(qual
, estate
);
1829 MemoryContextSwitchTo(oldcxt
);
1833 * We will use the EState's per-tuple context for evaluating constraint
1834 * expressions (creating it if it's not already there).
1836 econtext
= GetPerTupleExprContext(estate
);
1838 /* Arrange for econtext's scan tuple to be the tuple under test */
1839 econtext
->ecxt_scantuple
= slot
;
1842 * As in case of the cataloged constraints, we treat a NULL result as
1843 * success here, not a failure.
1845 success
= ExecCheck(resultRelInfo
->ri_PartitionCheckExpr
, econtext
);
1847 /* if asked to emit error, don't actually return on failure */
1848 if (!success
&& emitError
)
1849 ExecPartitionCheckEmitError(resultRelInfo
, slot
, estate
);
1855 * ExecPartitionCheckEmitError - Form and emit an error message after a failed
1856 * partition constraint check.
1859 ExecPartitionCheckEmitError(ResultRelInfo
*resultRelInfo
,
1860 TupleTableSlot
*slot
,
1866 Bitmapset
*modifiedCols
;
1869 * If the tuple has been routed, it's been converted to the partition's
1870 * rowtype, which might differ from the root table's. We must convert it
1871 * back to the root table's rowtype so that val_desc in the error message
1872 * matches the input tuple.
1874 if (resultRelInfo
->ri_RootResultRelInfo
)
1876 ResultRelInfo
*rootrel
= resultRelInfo
->ri_RootResultRelInfo
;
1877 TupleDesc old_tupdesc
;
1880 root_relid
= RelationGetRelid(rootrel
->ri_RelationDesc
);
1881 tupdesc
= RelationGetDescr(rootrel
->ri_RelationDesc
);
1883 old_tupdesc
= RelationGetDescr(resultRelInfo
->ri_RelationDesc
);
1885 map
= build_attrmap_by_name_if_req(old_tupdesc
, tupdesc
, false);
1888 * Partition-specific slot's tupdesc can't be changed, so allocate a
1892 slot
= execute_attr_map_slot(map
, slot
,
1893 MakeTupleTableSlot(tupdesc
, &TTSOpsVirtual
));
1894 modifiedCols
= bms_union(ExecGetInsertedCols(rootrel
, estate
),
1895 ExecGetUpdatedCols(rootrel
, estate
));
1899 root_relid
= RelationGetRelid(resultRelInfo
->ri_RelationDesc
);
1900 tupdesc
= RelationGetDescr(resultRelInfo
->ri_RelationDesc
);
1901 modifiedCols
= bms_union(ExecGetInsertedCols(resultRelInfo
, estate
),
1902 ExecGetUpdatedCols(resultRelInfo
, estate
));
1905 val_desc
= ExecBuildSlotValueDescription(root_relid
,
1911 (errcode(ERRCODE_CHECK_VIOLATION
),
1912 errmsg("new row for relation \"%s\" violates partition constraint",
1913 RelationGetRelationName(resultRelInfo
->ri_RelationDesc
)),
1914 val_desc
? errdetail("Failing row contains %s.", val_desc
) : 0,
1915 errtable(resultRelInfo
->ri_RelationDesc
)));
1919 * ExecConstraints - check constraints of the tuple in 'slot'
1921 * This checks the traditional NOT NULL and check constraints.
1923 * The partition constraint is *NOT* checked.
1925 * Note: 'slot' contains the tuple to check the constraints of, which may
1926 * have been converted from the original input tuple after tuple routing.
1927 * 'resultRelInfo' is the final result relation, after tuple routing.
1930 ExecConstraints(ResultRelInfo
*resultRelInfo
,
1931 TupleTableSlot
*slot
, EState
*estate
)
1933 Relation rel
= resultRelInfo
->ri_RelationDesc
;
1934 TupleDesc tupdesc
= RelationGetDescr(rel
);
1935 TupleConstr
*constr
= tupdesc
->constr
;
1936 Bitmapset
*modifiedCols
;
1938 Assert(constr
); /* we should not be called otherwise */
1940 if (constr
->has_not_null
)
1942 int natts
= tupdesc
->natts
;
1945 for (attrChk
= 1; attrChk
<= natts
; attrChk
++)
1947 Form_pg_attribute att
= TupleDescAttr(tupdesc
, attrChk
- 1);
1949 if (att
->attnotnull
&& slot_attisnull(slot
, attrChk
))
1952 Relation orig_rel
= rel
;
1953 TupleDesc orig_tupdesc
= RelationGetDescr(rel
);
1956 * If the tuple has been routed, it's been converted to the
1957 * partition's rowtype, which might differ from the root
1958 * table's. We must convert it back to the root table's
1959 * rowtype so that val_desc shown error message matches the
1962 if (resultRelInfo
->ri_RootResultRelInfo
)
1964 ResultRelInfo
*rootrel
= resultRelInfo
->ri_RootResultRelInfo
;
1967 tupdesc
= RelationGetDescr(rootrel
->ri_RelationDesc
);
1969 map
= build_attrmap_by_name_if_req(orig_tupdesc
,
1974 * Partition-specific slot's tupdesc can't be changed, so
1975 * allocate a new one.
1978 slot
= execute_attr_map_slot(map
, slot
,
1979 MakeTupleTableSlot(tupdesc
, &TTSOpsVirtual
));
1980 modifiedCols
= bms_union(ExecGetInsertedCols(rootrel
, estate
),
1981 ExecGetUpdatedCols(rootrel
, estate
));
1982 rel
= rootrel
->ri_RelationDesc
;
1985 modifiedCols
= bms_union(ExecGetInsertedCols(resultRelInfo
, estate
),
1986 ExecGetUpdatedCols(resultRelInfo
, estate
));
1987 val_desc
= ExecBuildSlotValueDescription(RelationGetRelid(rel
),
1994 (errcode(ERRCODE_NOT_NULL_VIOLATION
),
1995 errmsg("null value in column \"%s\" of relation \"%s\" violates not-null constraint",
1996 NameStr(att
->attname
),
1997 RelationGetRelationName(orig_rel
)),
1998 val_desc
? errdetail("Failing row contains %s.", val_desc
) : 0,
1999 errtablecol(orig_rel
, attrChk
)));
2004 if (rel
->rd_rel
->relchecks
> 0)
2008 if ((failed
= ExecRelCheck(resultRelInfo
, slot
, estate
)) != NULL
)
2011 Relation orig_rel
= rel
;
2013 /* See the comment above. */
2014 if (resultRelInfo
->ri_RootResultRelInfo
)
2016 ResultRelInfo
*rootrel
= resultRelInfo
->ri_RootResultRelInfo
;
2017 TupleDesc old_tupdesc
= RelationGetDescr(rel
);
2020 tupdesc
= RelationGetDescr(rootrel
->ri_RelationDesc
);
2022 map
= build_attrmap_by_name_if_req(old_tupdesc
,
2027 * Partition-specific slot's tupdesc can't be changed, so
2028 * allocate a new one.
2031 slot
= execute_attr_map_slot(map
, slot
,
2032 MakeTupleTableSlot(tupdesc
, &TTSOpsVirtual
));
2033 modifiedCols
= bms_union(ExecGetInsertedCols(rootrel
, estate
),
2034 ExecGetUpdatedCols(rootrel
, estate
));
2035 rel
= rootrel
->ri_RelationDesc
;
2038 modifiedCols
= bms_union(ExecGetInsertedCols(resultRelInfo
, estate
),
2039 ExecGetUpdatedCols(resultRelInfo
, estate
));
2040 val_desc
= ExecBuildSlotValueDescription(RelationGetRelid(rel
),
2046 (errcode(ERRCODE_CHECK_VIOLATION
),
2047 errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2048 RelationGetRelationName(orig_rel
), failed
),
2049 val_desc
? errdetail("Failing row contains %s.", val_desc
) : 0,
2050 errtableconstraint(orig_rel
, failed
)));
2056 * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
2057 * of the specified kind.
2059 * Note that this needs to be called multiple times to ensure that all kinds of
2060 * WITH CHECK OPTIONs are handled (both those from views which have the WITH
2061 * CHECK OPTION set and from row-level security policies). See ExecInsert()
2065 ExecWithCheckOptions(WCOKind kind
, ResultRelInfo
*resultRelInfo
,
2066 TupleTableSlot
*slot
, EState
*estate
)
2068 Relation rel
= resultRelInfo
->ri_RelationDesc
;
2069 TupleDesc tupdesc
= RelationGetDescr(rel
);
2070 ExprContext
*econtext
;
2075 * We will use the EState's per-tuple context for evaluating constraint
2076 * expressions (creating it if it's not already there).
2078 econtext
= GetPerTupleExprContext(estate
);
2080 /* Arrange for econtext's scan tuple to be the tuple under test */
2081 econtext
->ecxt_scantuple
= slot
;
2083 /* Check each of the constraints */
2084 forboth(l1
, resultRelInfo
->ri_WithCheckOptions
,
2085 l2
, resultRelInfo
->ri_WithCheckOptionExprs
)
2087 WithCheckOption
*wco
= (WithCheckOption
*) lfirst(l1
);
2088 ExprState
*wcoExpr
= (ExprState
*) lfirst(l2
);
2091 * Skip any WCOs which are not the kind we are looking for at this
2094 if (wco
->kind
!= kind
)
2098 * WITH CHECK OPTION checks are intended to ensure that the new tuple
2099 * is visible (in the case of a view) or that it passes the
2100 * 'with-check' policy (in the case of row security). If the qual
2101 * evaluates to NULL or FALSE, then the new tuple won't be included in
2102 * the view or doesn't pass the 'with-check' policy for the table.
2104 if (!ExecQual(wcoExpr
, econtext
))
2107 Bitmapset
*modifiedCols
;
2112 * For WITH CHECK OPTIONs coming from views, we might be
2113 * able to provide the details on the row, depending on
2114 * the permissions on the relation (that is, if the user
2115 * could view it directly anyway). For RLS violations, we
2116 * don't include the data since we don't know if the user
2117 * should be able to view the tuple as that depends on the
2120 case WCO_VIEW_CHECK
:
2121 /* See the comment in ExecConstraints(). */
2122 if (resultRelInfo
->ri_RootResultRelInfo
)
2124 ResultRelInfo
*rootrel
= resultRelInfo
->ri_RootResultRelInfo
;
2125 TupleDesc old_tupdesc
= RelationGetDescr(rel
);
2128 tupdesc
= RelationGetDescr(rootrel
->ri_RelationDesc
);
2130 map
= build_attrmap_by_name_if_req(old_tupdesc
,
2135 * Partition-specific slot's tupdesc can't be changed,
2136 * so allocate a new one.
2139 slot
= execute_attr_map_slot(map
, slot
,
2140 MakeTupleTableSlot(tupdesc
, &TTSOpsVirtual
));
2142 modifiedCols
= bms_union(ExecGetInsertedCols(rootrel
, estate
),
2143 ExecGetUpdatedCols(rootrel
, estate
));
2144 rel
= rootrel
->ri_RelationDesc
;
2147 modifiedCols
= bms_union(ExecGetInsertedCols(resultRelInfo
, estate
),
2148 ExecGetUpdatedCols(resultRelInfo
, estate
));
2149 val_desc
= ExecBuildSlotValueDescription(RelationGetRelid(rel
),
2156 (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION
),
2157 errmsg("new row violates check option for view \"%s\"",
2159 val_desc
? errdetail("Failing row contains %s.",
2162 case WCO_RLS_INSERT_CHECK
:
2163 case WCO_RLS_UPDATE_CHECK
:
2164 if (wco
->polname
!= NULL
)
2166 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE
),
2167 errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
2168 wco
->polname
, wco
->relname
)));
2171 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE
),
2172 errmsg("new row violates row-level security policy for table \"%s\"",
2175 case WCO_RLS_MERGE_UPDATE_CHECK
:
2176 case WCO_RLS_MERGE_DELETE_CHECK
:
2177 if (wco
->polname
!= NULL
)
2179 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE
),
2180 errmsg("target row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2181 wco
->polname
, wco
->relname
)));
2184 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE
),
2185 errmsg("target row violates row-level security policy (USING expression) for table \"%s\"",
2188 case WCO_RLS_CONFLICT_CHECK
:
2189 if (wco
->polname
!= NULL
)
2191 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE
),
2192 errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2193 wco
->polname
, wco
->relname
)));
2196 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE
),
2197 errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
2201 elog(ERROR
, "unrecognized WCO kind: %u", wco
->kind
);
2209 * ExecBuildSlotValueDescription -- construct a string representing a tuple
2211 * This is intentionally very similar to BuildIndexValueDescription, but
2212 * unlike that function, we truncate long field values (to at most maxfieldlen
2213 * bytes). That seems necessary here since heap field values could be very
2214 * long, whereas index entries typically aren't so wide.
2216 * Also, unlike the case with index entries, we need to be prepared to ignore
2217 * dropped columns. We used to use the slot's tuple descriptor to decode the
2218 * data, but the slot's descriptor doesn't identify dropped columns, so we
2219 * now need to be passed the relation's descriptor.
2221 * Note that, like BuildIndexValueDescription, if the user does not have
2222 * permission to view any of the columns involved, a NULL is returned. Unlike
2223 * BuildIndexValueDescription, if the user has access to view a subset of the
2224 * column involved, that subset will be returned with a key identifying which
2228 ExecBuildSlotValueDescription(Oid reloid
,
2229 TupleTableSlot
*slot
,
2231 Bitmapset
*modifiedCols
,
2235 StringInfoData collist
;
2236 bool write_comma
= false;
2237 bool write_comma_collist
= false;
2239 AclResult aclresult
;
2240 bool table_perm
= false;
2241 bool any_perm
= false;
2244 * Check if RLS is enabled and should be active for the relation; if so,
2245 * then don't return anything. Otherwise, go through normal permission
2248 if (check_enable_rls(reloid
, InvalidOid
, true) == RLS_ENABLED
)
2251 initStringInfo(&buf
);
2253 appendStringInfoChar(&buf
, '(');
2256 * Check if the user has permissions to see the row. Table-level SELECT
2257 * allows access to all columns. If the user does not have table-level
2258 * SELECT then we check each column and include those the user has SELECT
2259 * rights on. Additionally, we always include columns the user provided
2262 aclresult
= pg_class_aclcheck(reloid
, GetUserId(), ACL_SELECT
);
2263 if (aclresult
!= ACLCHECK_OK
)
2265 /* Set up the buffer for the column list */
2266 initStringInfo(&collist
);
2267 appendStringInfoChar(&collist
, '(');
2270 table_perm
= any_perm
= true;
2272 /* Make sure the tuple is fully deconstructed */
2273 slot_getallattrs(slot
);
2275 for (i
= 0; i
< tupdesc
->natts
; i
++)
2277 bool column_perm
= false;
2280 Form_pg_attribute att
= TupleDescAttr(tupdesc
, i
);
2282 /* ignore dropped columns */
2283 if (att
->attisdropped
)
2289 * No table-level SELECT, so need to make sure they either have
2290 * SELECT rights on the column or that they have provided the data
2291 * for the column. If not, omit this column from the error
2294 aclresult
= pg_attribute_aclcheck(reloid
, att
->attnum
,
2295 GetUserId(), ACL_SELECT
);
2296 if (bms_is_member(att
->attnum
- FirstLowInvalidHeapAttributeNumber
,
2297 modifiedCols
) || aclresult
== ACLCHECK_OK
)
2299 column_perm
= any_perm
= true;
2301 if (write_comma_collist
)
2302 appendStringInfoString(&collist
, ", ");
2304 write_comma_collist
= true;
2306 appendStringInfoString(&collist
, NameStr(att
->attname
));
2310 if (table_perm
|| column_perm
)
2312 if (slot
->tts_isnull
[i
])
2319 getTypeOutputInfo(att
->atttypid
,
2320 &foutoid
, &typisvarlena
);
2321 val
= OidOutputFunctionCall(foutoid
, slot
->tts_values
[i
]);
2325 appendStringInfoString(&buf
, ", ");
2329 /* truncate if needed */
2330 vallen
= strlen(val
);
2331 if (vallen
<= maxfieldlen
)
2332 appendBinaryStringInfo(&buf
, val
, vallen
);
2335 vallen
= pg_mbcliplen(val
, vallen
, maxfieldlen
);
2336 appendBinaryStringInfo(&buf
, val
, vallen
);
2337 appendStringInfoString(&buf
, "...");
2342 /* If we end up with zero columns being returned, then return NULL. */
2346 appendStringInfoChar(&buf
, ')');
2350 appendStringInfoString(&collist
, ") = ");
2351 appendBinaryStringInfo(&collist
, buf
.data
, buf
.len
);
2353 return collist
.data
;
2361 * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2362 * given ResultRelInfo
2365 ExecUpdateLockMode(EState
*estate
, ResultRelInfo
*relinfo
)
2368 Bitmapset
*updatedCols
;
2371 * Compute lock mode to use. If columns that are part of the key have not
2372 * been modified, then we can use a weaker lock, allowing for better
2375 updatedCols
= ExecGetAllUpdatedCols(relinfo
, estate
);
2376 keyCols
= RelationGetIndexAttrBitmap(relinfo
->ri_RelationDesc
,
2377 INDEX_ATTR_BITMAP_KEY
);
2379 if (bms_overlap(keyCols
, updatedCols
))
2380 return LockTupleExclusive
;
2382 return LockTupleNoKeyExclusive
;
2386 * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2388 * If no such struct, either return NULL or throw error depending on missing_ok
2391 ExecFindRowMark(EState
*estate
, Index rti
, bool missing_ok
)
2393 if (rti
> 0 && rti
<= estate
->es_range_table_size
&&
2394 estate
->es_rowmarks
!= NULL
)
2396 ExecRowMark
*erm
= estate
->es_rowmarks
[rti
- 1];
2402 elog(ERROR
, "failed to find ExecRowMark for rangetable index %u", rti
);
2407 * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2409 * Inputs are the underlying ExecRowMark struct and the targetlist of the
2410 * input plan node (not planstate node!). We need the latter to find out
2411 * the column numbers of the resjunk columns.
2414 ExecBuildAuxRowMark(ExecRowMark
*erm
, List
*targetlist
)
2416 ExecAuxRowMark
*aerm
= (ExecAuxRowMark
*) palloc0(sizeof(ExecAuxRowMark
));
2419 aerm
->rowmark
= erm
;
2421 /* Look up the resjunk columns associated with this rowmark */
2422 if (erm
->markType
!= ROW_MARK_COPY
)
2424 /* need ctid for all methods other than COPY */
2425 snprintf(resname
, sizeof(resname
), "ctid%u", erm
->rowmarkId
);
2426 aerm
->ctidAttNo
= ExecFindJunkAttributeInTlist(targetlist
,
2428 if (!AttributeNumberIsValid(aerm
->ctidAttNo
))
2429 elog(ERROR
, "could not find junk %s column", resname
);
2433 /* need wholerow if COPY */
2434 snprintf(resname
, sizeof(resname
), "wholerow%u", erm
->rowmarkId
);
2435 aerm
->wholeAttNo
= ExecFindJunkAttributeInTlist(targetlist
,
2437 if (!AttributeNumberIsValid(aerm
->wholeAttNo
))
2438 elog(ERROR
, "could not find junk %s column", resname
);
2441 /* if child rel, need tableoid */
2442 if (erm
->rti
!= erm
->prti
)
2444 snprintf(resname
, sizeof(resname
), "tableoid%u", erm
->rowmarkId
);
2445 aerm
->toidAttNo
= ExecFindJunkAttributeInTlist(targetlist
,
2447 if (!AttributeNumberIsValid(aerm
->toidAttNo
))
2448 elog(ERROR
, "could not find junk %s column", resname
);
2456 * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2457 * process the updated version under READ COMMITTED rules.
2459 * See backend/executor/README for some info about how this works.
2464 * Check the updated version of a tuple to see if we want to process it under
2465 * READ COMMITTED rules.
2467 * epqstate - state for EvalPlanQual rechecking
2468 * relation - table containing tuple
2469 * rti - rangetable index of table containing tuple
2470 * inputslot - tuple for processing - this can be the slot from
2471 * EvalPlanQualSlot() for this rel, for increased efficiency.
2473 * This tests whether the tuple in inputslot still matches the relevant
2474 * quals. For that result to be useful, typically the input tuple has to be
2475 * last row version (otherwise the result isn't particularly useful) and
2476 * locked (otherwise the result might be out of date). That's typically
2477 * achieved by using table_tuple_lock() with the
2478 * TUPLE_LOCK_FLAG_FIND_LAST_VERSION flag.
2480 * Returns a slot containing the new candidate update/delete tuple, or
2481 * NULL if we determine we shouldn't process the row.
2484 EvalPlanQual(EPQState
*epqstate
, Relation relation
,
2485 Index rti
, TupleTableSlot
*inputslot
)
2487 TupleTableSlot
*slot
;
2488 TupleTableSlot
*testslot
;
2493 * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
2495 EvalPlanQualBegin(epqstate
);
2498 * Callers will often use the EvalPlanQualSlot to store the tuple to avoid
2499 * an unnecessary copy.
2501 testslot
= EvalPlanQualSlot(epqstate
, relation
, rti
);
2502 if (testslot
!= inputslot
)
2503 ExecCopySlot(testslot
, inputslot
);
2506 * Mark that an EPQ tuple is available for this relation. (If there is
2507 * more than one result relation, the others remain marked as having no
2510 epqstate
->relsubs_done
[rti
- 1] = false;
2511 epqstate
->relsubs_blocked
[rti
- 1] = false;
2514 * Run the EPQ query. We assume it will return at most one tuple.
2516 slot
= EvalPlanQualNext(epqstate
);
2519 * If we got a tuple, force the slot to materialize the tuple so that it
2520 * is not dependent on any local state in the EPQ query (in particular,
2521 * it's highly likely that the slot contains references to any pass-by-ref
2522 * datums that may be present in copyTuple). As with the next step, this
2523 * is to guard against early re-use of the EPQ query.
2525 if (!TupIsNull(slot
))
2526 ExecMaterializeSlot(slot
);
2529 * Clear out the test tuple, and mark that no tuple is available here.
2530 * This is needed in case the EPQ state is re-used to test a tuple for a
2531 * different target relation.
2533 ExecClearTuple(testslot
);
2534 epqstate
->relsubs_blocked
[rti
- 1] = true;
2540 * EvalPlanQualInit -- initialize during creation of a plan state node
2541 * that might need to invoke EPQ processing.
2543 * If the caller intends to use EvalPlanQual(), resultRelations should be
2544 * a list of RT indexes of potential target relations for EvalPlanQual(),
2545 * and we will arrange that the other listed relations don't return any
2546 * tuple during an EvalPlanQual() call. Otherwise resultRelations
2549 * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2550 * with EvalPlanQualSetPlan.
2553 EvalPlanQualInit(EPQState
*epqstate
, EState
*parentestate
,
2554 Plan
*subplan
, List
*auxrowmarks
,
2555 int epqParam
, List
*resultRelations
)
2557 Index rtsize
= parentestate
->es_range_table_size
;
2559 /* initialize data not changing over EPQState's lifetime */
2560 epqstate
->parentestate
= parentestate
;
2561 epqstate
->epqParam
= epqParam
;
2562 epqstate
->resultRelations
= resultRelations
;
2565 * Allocate space to reference a slot for each potential rti - do so now
2566 * rather than in EvalPlanQualBegin(), as done for other dynamically
2567 * allocated resources, so EvalPlanQualSlot() can be used to hold tuples
2568 * that *may* need EPQ later, without forcing the overhead of
2569 * EvalPlanQualBegin().
2571 epqstate
->tuple_table
= NIL
;
2572 epqstate
->relsubs_slot
= (TupleTableSlot
**)
2573 palloc0(rtsize
* sizeof(TupleTableSlot
*));
2575 /* ... and remember data that EvalPlanQualBegin will need */
2576 epqstate
->plan
= subplan
;
2577 epqstate
->arowMarks
= auxrowmarks
;
2579 /* ... and mark the EPQ state inactive */
2580 epqstate
->origslot
= NULL
;
2581 epqstate
->recheckestate
= NULL
;
2582 epqstate
->recheckplanstate
= NULL
;
2583 epqstate
->relsubs_rowmark
= NULL
;
2584 epqstate
->relsubs_done
= NULL
;
2585 epqstate
->relsubs_blocked
= NULL
;
2589 * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2591 * We used to need this so that ModifyTable could deal with multiple subplans.
2592 * It could now be refactored out of existence.
2595 EvalPlanQualSetPlan(EPQState
*epqstate
, Plan
*subplan
, List
*auxrowmarks
)
2597 /* If we have a live EPQ query, shut it down */
2598 EvalPlanQualEnd(epqstate
);
2599 /* And set/change the plan pointer */
2600 epqstate
->plan
= subplan
;
2601 /* The rowmarks depend on the plan, too */
2602 epqstate
->arowMarks
= auxrowmarks
;
2606 * Return, and create if necessary, a slot for an EPQ test tuple.
2608 * Note this only requires EvalPlanQualInit() to have been called,
2609 * EvalPlanQualBegin() is not necessary.
2612 EvalPlanQualSlot(EPQState
*epqstate
,
2613 Relation relation
, Index rti
)
2615 TupleTableSlot
**slot
;
2618 Assert(rti
> 0 && rti
<= epqstate
->parentestate
->es_range_table_size
);
2619 slot
= &epqstate
->relsubs_slot
[rti
- 1];
2623 MemoryContext oldcontext
;
2625 oldcontext
= MemoryContextSwitchTo(epqstate
->parentestate
->es_query_cxt
);
2626 *slot
= table_slot_create(relation
, &epqstate
->tuple_table
);
2627 MemoryContextSwitchTo(oldcontext
);
2634 * Fetch the current row value for a non-locked relation, identified by rti,
2635 * that needs to be scanned by an EvalPlanQual operation. origslot must have
2636 * been set to contain the current result row (top-level row) that we need to
2637 * recheck. Returns true if a substitution tuple was found, false if not.
2640 EvalPlanQualFetchRowMark(EPQState
*epqstate
, Index rti
, TupleTableSlot
*slot
)
2642 ExecAuxRowMark
*earm
= epqstate
->relsubs_rowmark
[rti
- 1];
2643 ExecRowMark
*erm
= earm
->rowmark
;
2647 Assert(earm
!= NULL
);
2648 Assert(epqstate
->origslot
!= NULL
);
2650 if (RowMarkRequiresRowShareLock(erm
->markType
))
2651 elog(ERROR
, "EvalPlanQual doesn't support locking rowmarks");
2653 /* if child rel, must check whether it produced this row */
2654 if (erm
->rti
!= erm
->prti
)
2658 datum
= ExecGetJunkAttribute(epqstate
->origslot
,
2661 /* non-locked rels could be on the inside of outer joins */
2665 tableoid
= DatumGetObjectId(datum
);
2667 Assert(OidIsValid(erm
->relid
));
2668 if (tableoid
!= erm
->relid
)
2670 /* this child is inactive right now */
2675 if (erm
->markType
== ROW_MARK_REFERENCE
)
2677 Assert(erm
->relation
!= NULL
);
2679 /* fetch the tuple's ctid */
2680 datum
= ExecGetJunkAttribute(epqstate
->origslot
,
2683 /* non-locked rels could be on the inside of outer joins */
2687 /* fetch requests on foreign tables must be passed to their FDW */
2688 if (erm
->relation
->rd_rel
->relkind
== RELKIND_FOREIGN_TABLE
)
2690 FdwRoutine
*fdwroutine
;
2691 bool updated
= false;
2693 fdwroutine
= GetFdwRoutineForRelation(erm
->relation
, false);
2694 /* this should have been checked already, but let's be safe */
2695 if (fdwroutine
->RefetchForeignRow
== NULL
)
2697 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
2698 errmsg("cannot lock rows in foreign table \"%s\"",
2699 RelationGetRelationName(erm
->relation
))));
2701 fdwroutine
->RefetchForeignRow(epqstate
->recheckestate
,
2706 if (TupIsNull(slot
))
2707 elog(ERROR
, "failed to fetch tuple for EvalPlanQual recheck");
2710 * Ideally we'd insist on updated == false here, but that assumes
2711 * that FDWs can track that exactly, which they might not be able
2712 * to. So just ignore the flag.
2718 /* ordinary table, fetch the tuple */
2719 if (!table_tuple_fetch_row_version(erm
->relation
,
2720 (ItemPointer
) DatumGetPointer(datum
),
2722 elog(ERROR
, "failed to fetch tuple for EvalPlanQual recheck");
2728 Assert(erm
->markType
== ROW_MARK_COPY
);
2730 /* fetch the whole-row Var for the relation */
2731 datum
= ExecGetJunkAttribute(epqstate
->origslot
,
2734 /* non-locked rels could be on the inside of outer joins */
2738 ExecStoreHeapTupleDatum(datum
, slot
);
2744 * Fetch the next row (if any) from EvalPlanQual testing
2746 * (In practice, there should never be more than one row...)
2749 EvalPlanQualNext(EPQState
*epqstate
)
2751 MemoryContext oldcontext
;
2752 TupleTableSlot
*slot
;
2754 oldcontext
= MemoryContextSwitchTo(epqstate
->recheckestate
->es_query_cxt
);
2755 slot
= ExecProcNode(epqstate
->recheckplanstate
);
2756 MemoryContextSwitchTo(oldcontext
);
2762 * Initialize or reset an EvalPlanQual state tree
2765 EvalPlanQualBegin(EPQState
*epqstate
)
2767 EState
*parentestate
= epqstate
->parentestate
;
2768 EState
*recheckestate
= epqstate
->recheckestate
;
2770 if (recheckestate
== NULL
)
2772 /* First time through, so create a child EState */
2773 EvalPlanQualStart(epqstate
, epqstate
->plan
);
2778 * We already have a suitable child EPQ tree, so just reset it.
2780 Index rtsize
= parentestate
->es_range_table_size
;
2781 PlanState
*rcplanstate
= epqstate
->recheckplanstate
;
2784 * Reset the relsubs_done[] flags to equal relsubs_blocked[], so that
2785 * the EPQ run will never attempt to fetch tuples from blocked target
2788 memcpy(epqstate
->relsubs_done
, epqstate
->relsubs_blocked
,
2789 rtsize
* sizeof(bool));
2791 /* Recopy current values of parent parameters */
2792 if (parentestate
->es_plannedstmt
->paramExecTypes
!= NIL
)
2797 * Force evaluation of any InitPlan outputs that could be needed
2798 * by the subplan, just in case they got reset since
2799 * EvalPlanQualStart (see comments therein).
2801 ExecSetParamPlanMulti(rcplanstate
->plan
->extParam
,
2802 GetPerTupleExprContext(parentestate
));
2804 i
= list_length(parentestate
->es_plannedstmt
->paramExecTypes
);
2808 /* copy value if any, but not execPlan link */
2809 recheckestate
->es_param_exec_vals
[i
].value
=
2810 parentestate
->es_param_exec_vals
[i
].value
;
2811 recheckestate
->es_param_exec_vals
[i
].isnull
=
2812 parentestate
->es_param_exec_vals
[i
].isnull
;
2817 * Mark child plan tree as needing rescan at all scan nodes. The
2818 * first ExecProcNode will take care of actually doing the rescan.
2820 rcplanstate
->chgParam
= bms_add_member(rcplanstate
->chgParam
,
2821 epqstate
->epqParam
);
2826 * Start execution of an EvalPlanQual plan tree.
2828 * This is a cut-down version of ExecutorStart(): we copy some state from
2829 * the top-level estate rather than initializing it fresh.
2832 EvalPlanQualStart(EPQState
*epqstate
, Plan
*planTree
)
2834 EState
*parentestate
= epqstate
->parentestate
;
2835 Index rtsize
= parentestate
->es_range_table_size
;
2837 MemoryContext oldcontext
;
2840 epqstate
->recheckestate
= rcestate
= CreateExecutorState();
2842 oldcontext
= MemoryContextSwitchTo(rcestate
->es_query_cxt
);
2844 /* signal that this is an EState for executing EPQ */
2845 rcestate
->es_epq_active
= epqstate
;
2848 * Child EPQ EStates share the parent's copy of unchanging state such as
2849 * the snapshot, rangetable, and external Param info. They need their own
2850 * copies of local state, including a tuple table, es_param_exec_vals,
2851 * result-rel info, etc.
2853 rcestate
->es_direction
= ForwardScanDirection
;
2854 rcestate
->es_snapshot
= parentestate
->es_snapshot
;
2855 rcestate
->es_crosscheck_snapshot
= parentestate
->es_crosscheck_snapshot
;
2856 rcestate
->es_range_table
= parentestate
->es_range_table
;
2857 rcestate
->es_range_table_size
= parentestate
->es_range_table_size
;
2858 rcestate
->es_relations
= parentestate
->es_relations
;
2859 rcestate
->es_rowmarks
= parentestate
->es_rowmarks
;
2860 rcestate
->es_rteperminfos
= parentestate
->es_rteperminfos
;
2861 rcestate
->es_plannedstmt
= parentestate
->es_plannedstmt
;
2862 rcestate
->es_junkFilter
= parentestate
->es_junkFilter
;
2863 rcestate
->es_output_cid
= parentestate
->es_output_cid
;
2864 rcestate
->es_queryEnv
= parentestate
->es_queryEnv
;
2867 * ResultRelInfos needed by subplans are initialized from scratch when the
2868 * subplans themselves are initialized.
2870 rcestate
->es_result_relations
= NULL
;
2871 /* es_trig_target_relations must NOT be copied */
2872 rcestate
->es_top_eflags
= parentestate
->es_top_eflags
;
2873 rcestate
->es_instrument
= parentestate
->es_instrument
;
2874 /* es_auxmodifytables must NOT be copied */
2877 * The external param list is simply shared from parent. The internal
2878 * param workspace has to be local state, but we copy the initial values
2879 * from the parent, so as to have access to any param values that were
2880 * already set from other parts of the parent's plan tree.
2882 rcestate
->es_param_list_info
= parentestate
->es_param_list_info
;
2883 if (parentestate
->es_plannedstmt
->paramExecTypes
!= NIL
)
2888 * Force evaluation of any InitPlan outputs that could be needed by
2889 * the subplan. (With more complexity, maybe we could postpone this
2890 * till the subplan actually demands them, but it doesn't seem worth
2891 * the trouble; this is a corner case already, since usually the
2892 * InitPlans would have been evaluated before reaching EvalPlanQual.)
2894 * This will not touch output params of InitPlans that occur somewhere
2895 * within the subplan tree, only those that are attached to the
2896 * ModifyTable node or above it and are referenced within the subplan.
2897 * That's OK though, because the planner would only attach such
2898 * InitPlans to a lower-level SubqueryScan node, and EPQ execution
2899 * will not descend into a SubqueryScan.
2901 * The EState's per-output-tuple econtext is sufficiently short-lived
2902 * for this, since it should get reset before there is any chance of
2903 * doing EvalPlanQual again.
2905 ExecSetParamPlanMulti(planTree
->extParam
,
2906 GetPerTupleExprContext(parentestate
));
2908 /* now make the internal param workspace ... */
2909 i
= list_length(parentestate
->es_plannedstmt
->paramExecTypes
);
2910 rcestate
->es_param_exec_vals
= (ParamExecData
*)
2911 palloc0(i
* sizeof(ParamExecData
));
2912 /* ... and copy down all values, whether really needed or not */
2915 /* copy value if any, but not execPlan link */
2916 rcestate
->es_param_exec_vals
[i
].value
=
2917 parentestate
->es_param_exec_vals
[i
].value
;
2918 rcestate
->es_param_exec_vals
[i
].isnull
=
2919 parentestate
->es_param_exec_vals
[i
].isnull
;
2924 * Initialize private state information for each SubPlan. We must do this
2925 * before running ExecInitNode on the main query tree, since
2926 * ExecInitSubPlan expects to be able to find these entries. Some of the
2927 * SubPlans might not be used in the part of the plan tree we intend to
2928 * run, but since it's not easy to tell which, we just initialize them
2931 Assert(rcestate
->es_subplanstates
== NIL
);
2932 foreach(l
, parentestate
->es_plannedstmt
->subplans
)
2934 Plan
*subplan
= (Plan
*) lfirst(l
);
2935 PlanState
*subplanstate
;
2937 subplanstate
= ExecInitNode(subplan
, rcestate
, 0);
2938 rcestate
->es_subplanstates
= lappend(rcestate
->es_subplanstates
,
2943 * Build an RTI indexed array of rowmarks, so that
2944 * EvalPlanQualFetchRowMark() can efficiently access the to be fetched
2947 epqstate
->relsubs_rowmark
= (ExecAuxRowMark
**)
2948 palloc0(rtsize
* sizeof(ExecAuxRowMark
*));
2949 foreach(l
, epqstate
->arowMarks
)
2951 ExecAuxRowMark
*earm
= (ExecAuxRowMark
*) lfirst(l
);
2953 epqstate
->relsubs_rowmark
[earm
->rowmark
->rti
- 1] = earm
;
2957 * Initialize per-relation EPQ tuple states. Result relations, if any,
2958 * get marked as blocked; others as not-fetched.
2960 epqstate
->relsubs_done
= palloc_array(bool, rtsize
);
2961 epqstate
->relsubs_blocked
= palloc0_array(bool, rtsize
);
2963 foreach(l
, epqstate
->resultRelations
)
2965 int rtindex
= lfirst_int(l
);
2967 Assert(rtindex
> 0 && rtindex
<= rtsize
);
2968 epqstate
->relsubs_blocked
[rtindex
- 1] = true;
2971 memcpy(epqstate
->relsubs_done
, epqstate
->relsubs_blocked
,
2972 rtsize
* sizeof(bool));
2975 * Initialize the private state information for all the nodes in the part
2976 * of the plan tree we need to run. This opens files, allocates storage
2977 * and leaves us ready to start processing tuples.
2979 epqstate
->recheckplanstate
= ExecInitNode(planTree
, rcestate
, 0);
2981 MemoryContextSwitchTo(oldcontext
);
2985 * EvalPlanQualEnd -- shut down at termination of parent plan state node,
2986 * or if we are done with the current EPQ child.
2988 * This is a cut-down version of ExecutorEnd(); basically we want to do most
2989 * of the normal cleanup, but *not* close result relations (which we are
2990 * just sharing from the outer query). We do, however, have to close any
2991 * result and trigger target relations that got opened, since those are not
2992 * shared. (There probably shouldn't be any of the latter, but just in
2996 EvalPlanQualEnd(EPQState
*epqstate
)
2998 EState
*estate
= epqstate
->recheckestate
;
3000 MemoryContext oldcontext
;
3003 rtsize
= epqstate
->parentestate
->es_range_table_size
;
3006 * We may have a tuple table, even if EPQ wasn't started, because we allow
3007 * use of EvalPlanQualSlot() without calling EvalPlanQualBegin().
3009 if (epqstate
->tuple_table
!= NIL
)
3011 memset(epqstate
->relsubs_slot
, 0,
3012 rtsize
* sizeof(TupleTableSlot
*));
3013 ExecResetTupleTable(epqstate
->tuple_table
, true);
3014 epqstate
->tuple_table
= NIL
;
3017 /* EPQ wasn't started, nothing further to do */
3021 oldcontext
= MemoryContextSwitchTo(estate
->es_query_cxt
);
3023 ExecEndNode(epqstate
->recheckplanstate
);
3025 foreach(l
, estate
->es_subplanstates
)
3027 PlanState
*subplanstate
= (PlanState
*) lfirst(l
);
3029 ExecEndNode(subplanstate
);
3032 /* throw away the per-estate tuple table, some node may have used it */
3033 ExecResetTupleTable(estate
->es_tupleTable
, false);
3035 /* Close any result and trigger target relations attached to this EState */
3036 ExecCloseResultRelations(estate
);
3038 MemoryContextSwitchTo(oldcontext
);
3040 FreeExecutorState(estate
);
3042 /* Mark EPQState idle */
3043 epqstate
->origslot
= NULL
;
3044 epqstate
->recheckestate
= NULL
;
3045 epqstate
->recheckplanstate
= NULL
;
3046 epqstate
->relsubs_rowmark
= NULL
;
3047 epqstate
->relsubs_done
= NULL
;
3048 epqstate
->relsubs_blocked
= NULL
;