1 /*-------------------------------------------------------------------------
4 * routines to handle CteScan nodes.
6 * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/executor/nodeCtescan.c
13 *-------------------------------------------------------------------------
18 #include "executor/executor.h"
19 #include "executor/nodeCtescan.h"
20 #include "miscadmin.h"
22 static TupleTableSlot
*CteScanNext(CteScanState
*node
);
24 /* ----------------------------------------------------------------
27 * This is a workhorse for ExecCteScan
28 * ----------------------------------------------------------------
30 static TupleTableSlot
*
31 CteScanNext(CteScanState
*node
)
36 Tuplestorestate
*tuplestorestate
;
41 * get state info from node
43 estate
= node
->ss
.ps
.state
;
44 dir
= estate
->es_direction
;
45 forward
= ScanDirectionIsForward(dir
);
46 tuplestorestate
= node
->leader
->cte_table
;
47 tuplestore_select_read_pointer(tuplestorestate
, node
->readptr
);
48 slot
= node
->ss
.ss_ScanTupleSlot
;
51 * If we are not at the end of the tuplestore, or are going backwards, try
52 * to fetch a tuple from tuplestore.
54 eof_tuplestore
= tuplestore_ateof(tuplestorestate
);
56 if (!forward
&& eof_tuplestore
)
58 if (!node
->leader
->eof_cte
)
61 * When reversing direction at tuplestore EOF, the first
62 * gettupleslot call will fetch the last-added tuple; but we want
63 * to return the one before that, if possible. So do an extra
66 if (!tuplestore_advance(tuplestorestate
, forward
))
67 return NULL
; /* the tuplestore must be empty */
69 eof_tuplestore
= false;
73 * If we can fetch another tuple from the tuplestore, return it.
75 * Note: we have to use copy=true in the tuplestore_gettupleslot call,
76 * because we are sharing the tuplestore with other nodes that might write
77 * into the tuplestore before we get called again.
81 if (tuplestore_gettupleslot(tuplestorestate
, forward
, true, slot
))
84 eof_tuplestore
= true;
88 * If necessary, try to fetch another row from the CTE query.
90 * Note: the eof_cte state variable exists to short-circuit further calls
91 * of the CTE plan. It's not optional, unfortunately, because some plan
92 * node types are not robust about being called again when they've already
95 if (eof_tuplestore
&& !node
->leader
->eof_cte
)
97 TupleTableSlot
*cteslot
;
100 * We can only get here with forward==true, so no need to worry about
101 * which direction the subplan will go.
103 cteslot
= ExecProcNode(node
->cteplanstate
);
104 if (TupIsNull(cteslot
))
106 node
->leader
->eof_cte
= true;
111 * There are corner cases where the subplan could change which
112 * tuplestore read pointer is active, so be sure to reselect ours
113 * before storing the tuple we got.
115 tuplestore_select_read_pointer(tuplestorestate
, node
->readptr
);
118 * Append a copy of the returned tuple to tuplestore. NOTE: because
119 * our read pointer is certainly in EOF state, its read position will
120 * move forward over the added tuple. This is what we want. Also,
121 * any other readers will *not* move past the new tuple, which is what
124 tuplestore_puttupleslot(tuplestorestate
, cteslot
);
127 * We MUST copy the CTE query's output tuple into our own slot. This
128 * is because other CteScan nodes might advance the CTE query before
129 * we are called again, and our output tuple must stay stable over
132 return ExecCopySlot(slot
, cteslot
);
138 return ExecClearTuple(slot
);
142 * CteScanRecheck -- access method routine to recheck a tuple in EvalPlanQual
145 CteScanRecheck(CteScanState
*node
, TupleTableSlot
*slot
)
147 /* nothing to check */
151 /* ----------------------------------------------------------------
154 * Scans the CTE sequentially and returns the next qualifying tuple.
155 * We call the ExecScan() routine and pass it the appropriate
156 * access method functions.
157 * ----------------------------------------------------------------
159 static TupleTableSlot
*
160 ExecCteScan(PlanState
*pstate
)
162 CteScanState
*node
= castNode(CteScanState
, pstate
);
164 return ExecScan(&node
->ss
,
165 (ExecScanAccessMtd
) CteScanNext
,
166 (ExecScanRecheckMtd
) CteScanRecheck
);
170 /* ----------------------------------------------------------------
172 * ----------------------------------------------------------------
175 ExecInitCteScan(CteScan
*node
, EState
*estate
, int eflags
)
177 CteScanState
*scanstate
;
178 ParamExecData
*prmdata
;
180 /* check for unsupported flags */
181 Assert(!(eflags
& EXEC_FLAG_MARK
));
184 * For the moment we have to force the tuplestore to allow REWIND, because
185 * we might be asked to rescan the CTE even though upper levels didn't
186 * tell us to be prepared to do it efficiently. Annoying, since this
187 * prevents truncation of the tuplestore. XXX FIXME
189 * Note: if we are in an EPQ recheck plan tree, it's likely that no access
190 * to the tuplestore is needed at all, making this even more annoying.
191 * It's not worth improving that as long as all the read pointers would
192 * have REWIND anyway, but if we ever improve this logic then that aspect
193 * should be considered too.
195 eflags
|= EXEC_FLAG_REWIND
;
198 * CteScan should not have any children.
200 Assert(outerPlan(node
) == NULL
);
201 Assert(innerPlan(node
) == NULL
);
204 * create new CteScanState for node
206 scanstate
= makeNode(CteScanState
);
207 scanstate
->ss
.ps
.plan
= (Plan
*) node
;
208 scanstate
->ss
.ps
.state
= estate
;
209 scanstate
->ss
.ps
.ExecProcNode
= ExecCteScan
;
210 scanstate
->eflags
= eflags
;
211 scanstate
->cte_table
= NULL
;
212 scanstate
->eof_cte
= false;
215 * Find the already-initialized plan for the CTE query.
217 scanstate
->cteplanstate
= (PlanState
*) list_nth(estate
->es_subplanstates
,
218 node
->ctePlanId
- 1);
221 * The Param slot associated with the CTE query is used to hold a pointer
222 * to the CteState of the first CteScan node that initializes for this
223 * CTE. This node will be the one that holds the shared state for all the
224 * CTEs, particularly the shared tuplestore.
226 prmdata
= &(estate
->es_param_exec_vals
[node
->cteParam
]);
227 Assert(prmdata
->execPlan
== NULL
);
228 Assert(!prmdata
->isnull
);
229 scanstate
->leader
= castNode(CteScanState
, DatumGetPointer(prmdata
->value
));
230 if (scanstate
->leader
== NULL
)
232 /* I am the leader */
233 prmdata
->value
= PointerGetDatum(scanstate
);
234 scanstate
->leader
= scanstate
;
235 scanstate
->cte_table
= tuplestore_begin_heap(true, false, work_mem
);
236 tuplestore_set_eflags(scanstate
->cte_table
, scanstate
->eflags
);
237 scanstate
->readptr
= 0;
242 /* Create my own read pointer, and ensure it is at start */
244 tuplestore_alloc_read_pointer(scanstate
->leader
->cte_table
,
246 tuplestore_select_read_pointer(scanstate
->leader
->cte_table
,
248 tuplestore_rescan(scanstate
->leader
->cte_table
);
252 * Miscellaneous initialization
254 * create expression context for node
256 ExecAssignExprContext(estate
, &scanstate
->ss
.ps
);
259 * The scan tuple type (ie, the rowtype we expect to find in the work
260 * table) is the same as the result rowtype of the CTE query.
262 ExecInitScanTupleSlot(estate
, &scanstate
->ss
,
263 ExecGetResultType(scanstate
->cteplanstate
),
264 &TTSOpsMinimalTuple
);
267 * Initialize result type and projection.
269 ExecInitResultTypeTL(&scanstate
->ss
.ps
);
270 ExecAssignScanProjectionInfo(&scanstate
->ss
);
273 * initialize child expressions
275 scanstate
->ss
.ps
.qual
=
276 ExecInitQual(node
->scan
.plan
.qual
, (PlanState
*) scanstate
);
281 /* ----------------------------------------------------------------
284 * frees any storage allocated through C routines.
285 * ----------------------------------------------------------------
288 ExecEndCteScan(CteScanState
*node
)
291 * If I am the leader, free the tuplestore.
293 if (node
->leader
== node
)
295 tuplestore_end(node
->cte_table
);
296 node
->cte_table
= NULL
;
300 /* ----------------------------------------------------------------
303 * Rescans the relation.
304 * ----------------------------------------------------------------
307 ExecReScanCteScan(CteScanState
*node
)
309 Tuplestorestate
*tuplestorestate
= node
->leader
->cte_table
;
311 if (node
->ss
.ps
.ps_ResultTupleSlot
)
312 ExecClearTuple(node
->ss
.ps
.ps_ResultTupleSlot
);
314 ExecScanReScan(&node
->ss
);
317 * Clear the tuplestore if a new scan of the underlying CTE is required.
318 * This implicitly resets all the tuplestore's read pointers. Note that
319 * multiple CTE nodes might redundantly clear the tuplestore; that's OK,
320 * and not unduly expensive. We'll stop taking this path as soon as
321 * somebody has attempted to read something from the underlying CTE
322 * (thereby causing its chgParam to be cleared).
324 if (node
->leader
->cteplanstate
->chgParam
!= NULL
)
326 tuplestore_clear(tuplestorestate
);
327 node
->leader
->eof_cte
= false;
332 * Else, just rewind my own pointer. Either the underlying CTE
333 * doesn't need a rescan (and we can re-read what's in the tuplestore
334 * now), or somebody else already took care of it.
336 tuplestore_select_read_pointer(tuplestorestate
, node
->readptr
);
337 tuplestore_rescan(tuplestorestate
);