1 /*-------------------------------------------------------------------------
4 * Routines to compute (and set) relation sizes and path costs
6 * Path costs are measured in arbitrary units established by these basic
9 * seq_page_cost Cost of a sequential page fetch
10 * random_page_cost Cost of a non-sequential page fetch
11 * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 * cpu_operator_cost Cost of CPU time to execute an operator or function
15 * We expect that the kernel will typically do some amount of read-ahead
16 * optimization; this in conjunction with seek costs means that seq_page_cost
17 * is normally considerably less than random_page_cost. (However, if the
18 * database is fully cached in RAM, it is reasonable to set them equal.)
20 * We also use a rough estimate "effective_cache_size" of the number of
21 * disk pages in Postgres + OS-level disk cache. (We can't simply use
22 * NBuffers for this purpose because that would ignore the effects of
23 * the kernel's disk cache.)
25 * Obviously, taking constants for these values is an oversimplification,
26 * but it's tough enough to get any useful estimates even at this level of
27 * detail. Note that all of these parameters are user-settable, in case
28 * the default values are drastically off for a particular platform.
30 * We compute two separate costs for each path:
31 * total_cost: total estimated cost to fetch all tuples
32 * startup_cost: cost that is expended before first tuple is fetched
33 * In some scenarios, such as when there is a LIMIT or we are implementing
34 * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
35 * path's result. A caller can estimate the cost of fetching a partial
36 * result by interpolating between startup_cost and total_cost. In detail:
37 * actual_cost = startup_cost +
38 * (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
39 * Note that a base relation's rows count (and, by extension, plan_rows for
40 * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
41 * that this equation works properly. (Also, these routines guarantee not to
42 * set the rows count to zero, so there will be no zero divide.) The LIMIT is
43 * applied as a top-level plan node.
45 * For largely historical reasons, most of the routines in this module use
46 * the passed result Path only to store their startup_cost and total_cost
47 * results into. All the input data they need is passed as separate
48 * parameters, even though much of it could be extracted from the Path.
49 * An exception is made for the cost_XXXjoin() routines, which expect all
50 * the non-cost fields of the passed XXXPath to be filled in.
53 * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
54 * Portions Copyright (c) 1994, Regents of the University of California
59 *-------------------------------------------------------------------------
66 #include "executor/nodeHash.h"
67 #include "miscadmin.h"
68 #include "nodes/nodeFuncs.h"
69 #include "optimizer/clauses.h"
70 #include "optimizer/cost.h"
71 #include "optimizer/pathnode.h"
72 #include "optimizer/placeholder.h"
73 #include "optimizer/planmain.h"
74 #include "optimizer/restrictinfo.h"
75 #include "parser/parsetree.h"
76 #include "utils/lsyscache.h"
77 #include "utils/selfuncs.h"
78 #include "utils/tuplesort.h"
81 #define LOG2(x) (log(x) / 0.693147180559945)
84 * Some Paths return less than the nominal number of rows of their parent
85 * relations; join nodes need to do this to get the correct input count:
87 #define PATH_ROWS(path) \
88 (IsA(path, UniquePath) ? \
89 ((UniquePath *) (path))->rows : \
93 double seq_page_cost
= DEFAULT_SEQ_PAGE_COST
;
94 double random_page_cost
= DEFAULT_RANDOM_PAGE_COST
;
95 double cpu_tuple_cost
= DEFAULT_CPU_TUPLE_COST
;
96 double cpu_index_tuple_cost
= DEFAULT_CPU_INDEX_TUPLE_COST
;
97 double cpu_operator_cost
= DEFAULT_CPU_OPERATOR_COST
;
99 int effective_cache_size
= DEFAULT_EFFECTIVE_CACHE_SIZE
;
101 Cost disable_cost
= 1.0e10
;
103 bool enable_seqscan
= true;
104 bool enable_indexscan
= true;
105 bool enable_bitmapscan
= true;
106 bool enable_tidscan
= true;
107 bool enable_sort
= true;
108 bool enable_hashagg
= true;
109 bool enable_nestloop
= true;
110 bool enable_mergejoin
= true;
111 bool enable_hashjoin
= true;
117 } cost_qual_eval_context
;
119 static MergeScanSelCache
*cached_scansel(PlannerInfo
*root
,
122 static bool cost_qual_eval_walker(Node
*node
, cost_qual_eval_context
*context
);
123 static bool adjust_semi_join(PlannerInfo
*root
, JoinPath
*path
,
124 SpecialJoinInfo
*sjinfo
,
125 Selectivity
*outer_match_frac
,
126 Selectivity
*match_count
,
127 bool *indexed_join_quals
);
128 static double approx_tuple_count(PlannerInfo
*root
, JoinPath
*path
,
130 static void set_rel_width(PlannerInfo
*root
, RelOptInfo
*rel
);
131 static double relation_byte_size(double tuples
, int width
);
132 static double page_size(double tuples
, int width
);
137 * Force a row-count estimate to a sane value.
140 clamp_row_est(double nrows
)
143 * Force estimate to be at least one row, to make explain output look
144 * better and to avoid possible divide-by-zero when interpolating costs.
145 * Make it an integer, too.
158 * Determines and returns the cost of scanning a relation sequentially.
161 cost_seqscan(Path
*path
, PlannerInfo
*root
,
164 Cost startup_cost
= 0;
168 /* Should only be applied to base relations */
169 Assert(baserel
->relid
> 0);
170 Assert(baserel
->rtekind
== RTE_RELATION
);
173 startup_cost
+= disable_cost
;
178 run_cost
+= seq_page_cost
* baserel
->pages
;
181 startup_cost
+= baserel
->baserestrictcost
.startup
;
182 cpu_per_tuple
= cpu_tuple_cost
+ baserel
->baserestrictcost
.per_tuple
;
183 run_cost
+= cpu_per_tuple
* baserel
->tuples
;
185 path
->startup_cost
= startup_cost
;
186 path
->total_cost
= startup_cost
+ run_cost
;
191 * Determines and returns the cost of scanning a relation using an index.
193 * 'index' is the index to be used
194 * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
195 * 'outer_rel' is the outer relation when we are considering using the index
196 * scan as the inside of a nestloop join (hence, some of the indexQuals
197 * are join clauses, and we should expect repeated scans of the index);
198 * NULL for a plain index scan
200 * cost_index() takes an IndexPath not just a Path, because it sets a few
201 * additional fields of the IndexPath besides startup_cost and total_cost.
202 * These fields are needed if the IndexPath is used in a BitmapIndexScan.
204 * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
205 * Any additional quals evaluated as qpquals may reduce the number of returned
206 * tuples, but they won't reduce the number of tuples we have to fetch from
207 * the table, so they don't reduce the scan cost.
209 * NOTE: as of 8.0, indexQuals is a list of RestrictInfo nodes, where formerly
210 * it was a list of bare clause expressions.
213 cost_index(IndexPath
*path
, PlannerInfo
*root
,
216 RelOptInfo
*outer_rel
)
218 RelOptInfo
*baserel
= index
->rel
;
219 Cost startup_cost
= 0;
221 Cost indexStartupCost
;
223 Selectivity indexSelectivity
;
224 double indexCorrelation
,
229 double tuples_fetched
;
230 double pages_fetched
;
232 /* Should only be applied to base relations */
233 Assert(IsA(baserel
, RelOptInfo
) &&
234 IsA(index
, IndexOptInfo
));
235 Assert(baserel
->relid
> 0);
236 Assert(baserel
->rtekind
== RTE_RELATION
);
238 if (!enable_indexscan
)
239 startup_cost
+= disable_cost
;
242 * Call index-access-method-specific code to estimate the processing cost
243 * for scanning the index, as well as the selectivity of the index (ie,
244 * the fraction of main-table tuples we will have to retrieve) and its
245 * correlation to the main-table tuple order.
247 OidFunctionCall8(index
->amcostestimate
,
248 PointerGetDatum(root
),
249 PointerGetDatum(index
),
250 PointerGetDatum(indexQuals
),
251 PointerGetDatum(outer_rel
),
252 PointerGetDatum(&indexStartupCost
),
253 PointerGetDatum(&indexTotalCost
),
254 PointerGetDatum(&indexSelectivity
),
255 PointerGetDatum(&indexCorrelation
));
258 * Save amcostestimate's results for possible use in bitmap scan planning.
259 * We don't bother to save indexStartupCost or indexCorrelation, because a
260 * bitmap scan doesn't care about either.
262 path
->indextotalcost
= indexTotalCost
;
263 path
->indexselectivity
= indexSelectivity
;
265 /* all costs for touching index itself included here */
266 startup_cost
+= indexStartupCost
;
267 run_cost
+= indexTotalCost
- indexStartupCost
;
269 /* estimate number of main-table tuples fetched */
270 tuples_fetched
= clamp_row_est(indexSelectivity
* baserel
->tuples
);
273 * Estimate number of main-table pages fetched, and compute I/O cost.
275 * When the index ordering is uncorrelated with the table ordering,
276 * we use an approximation proposed by Mackert and Lohman (see
277 * index_pages_fetched() for details) to compute the number of pages
278 * fetched, and then charge random_page_cost per page fetched.
280 * When the index ordering is exactly correlated with the table ordering
281 * (just after a CLUSTER, for example), the number of pages fetched should
282 * be exactly selectivity * table_size. What's more, all but the first
283 * will be sequential fetches, not the random fetches that occur in the
284 * uncorrelated case. So if the number of pages is more than 1, we
286 * random_page_cost + (pages_fetched - 1) * seq_page_cost
287 * For partially-correlated indexes, we ought to charge somewhere between
288 * these two estimates. We currently interpolate linearly between the
289 * estimates based on the correlation squared (XXX is that appropriate?).
292 if (outer_rel
!= NULL
&& outer_rel
->rows
> 1)
295 * For repeated indexscans, the appropriate estimate for the
296 * uncorrelated case is to scale up the number of tuples fetched in
297 * the Mackert and Lohman formula by the number of scans, so that we
298 * estimate the number of pages fetched by all the scans; then
299 * pro-rate the costs for one scan. In this case we assume all the
300 * fetches are random accesses.
302 double num_scans
= outer_rel
->rows
;
304 pages_fetched
= index_pages_fetched(tuples_fetched
* num_scans
,
306 (double) index
->pages
,
309 max_IO_cost
= (pages_fetched
* random_page_cost
) / num_scans
;
312 * In the perfectly correlated case, the number of pages touched by
313 * each scan is selectivity * table_size, and we can use the Mackert
314 * and Lohman formula at the page level to estimate how much work is
315 * saved by caching across scans. We still assume all the fetches are
316 * random, though, which is an overestimate that's hard to correct for
317 * without double-counting the cache effects. (But in most cases
318 * where such a plan is actually interesting, only one page would get
319 * fetched per scan anyway, so it shouldn't matter much.)
321 pages_fetched
= ceil(indexSelectivity
* (double) baserel
->pages
);
323 pages_fetched
= index_pages_fetched(pages_fetched
* num_scans
,
325 (double) index
->pages
,
328 min_IO_cost
= (pages_fetched
* random_page_cost
) / num_scans
;
333 * Normal case: apply the Mackert and Lohman formula, and then
334 * interpolate between that and the correlation-derived result.
336 pages_fetched
= index_pages_fetched(tuples_fetched
,
338 (double) index
->pages
,
341 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
342 max_IO_cost
= pages_fetched
* random_page_cost
;
344 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
345 pages_fetched
= ceil(indexSelectivity
* (double) baserel
->pages
);
346 min_IO_cost
= random_page_cost
;
347 if (pages_fetched
> 1)
348 min_IO_cost
+= (pages_fetched
- 1) * seq_page_cost
;
352 * Now interpolate based on estimated index order correlation to get total
353 * disk I/O cost for main table accesses.
355 csquared
= indexCorrelation
* indexCorrelation
;
357 run_cost
+= max_IO_cost
+ csquared
* (min_IO_cost
- max_IO_cost
);
360 * Estimate CPU costs per tuple.
362 * Normally the indexquals will be removed from the list of restriction
363 * clauses that we have to evaluate as qpquals, so we should subtract
364 * their costs from baserestrictcost. But if we are doing a join then
365 * some of the indexquals are join clauses and shouldn't be subtracted.
366 * Rather than work out exactly how much to subtract, we don't subtract
369 startup_cost
+= baserel
->baserestrictcost
.startup
;
370 cpu_per_tuple
= cpu_tuple_cost
+ baserel
->baserestrictcost
.per_tuple
;
372 if (outer_rel
== NULL
)
374 QualCost index_qual_cost
;
376 cost_qual_eval(&index_qual_cost
, indexQuals
, root
);
377 /* any startup cost still has to be paid ... */
378 cpu_per_tuple
-= index_qual_cost
.per_tuple
;
381 run_cost
+= cpu_per_tuple
* tuples_fetched
;
383 path
->path
.startup_cost
= startup_cost
;
384 path
->path
.total_cost
= startup_cost
+ run_cost
;
388 * index_pages_fetched
389 * Estimate the number of pages actually fetched after accounting for
392 * We use an approximation proposed by Mackert and Lohman, "Index Scans
393 * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
394 * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
395 * The Mackert and Lohman approximation is that the number of pages
398 * min(2TNs/(2T+Ns), T) when T <= b
399 * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
400 * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
402 * T = # pages in table
403 * N = # tuples in table
404 * s = selectivity = fraction of table to be scanned
405 * b = # buffer pages available (we include kernel space here)
407 * We assume that effective_cache_size is the total number of buffer pages
408 * available for the whole query, and pro-rate that space across all the
409 * tables in the query and the index currently under consideration. (This
410 * ignores space needed for other indexes used by the query, but since we
411 * don't know which indexes will get used, we can't estimate that very well;
412 * and in any case counting all the tables may well be an overestimate, since
413 * depending on the join plan not all the tables may be scanned concurrently.)
415 * The product Ns is the number of tuples fetched; we pass in that
416 * product rather than calculating it here. "pages" is the number of pages
417 * in the object under consideration (either an index or a table).
418 * "index_pages" is the amount to add to the total table space, which was
419 * computed for us by query_planner.
421 * Caller is expected to have ensured that tuples_fetched is greater than zero
422 * and rounded to integer (see clamp_row_est). The result will likewise be
423 * greater than zero and integral.
426 index_pages_fetched(double tuples_fetched
, BlockNumber pages
,
427 double index_pages
, PlannerInfo
*root
)
429 double pages_fetched
;
434 /* T is # pages in table, but don't allow it to be zero */
435 T
= (pages
> 1) ? (double) pages
: 1.0;
437 /* Compute number of pages assumed to be competing for cache space */
438 total_pages
= root
->total_table_pages
+ index_pages
;
439 total_pages
= Max(total_pages
, 1.0);
440 Assert(T
<= total_pages
);
442 /* b is pro-rated share of effective_cache_size */
443 b
= (double) effective_cache_size
*T
/ total_pages
;
445 /* force it positive and integral */
451 /* This part is the Mackert and Lohman formula */
455 (2.0 * T
* tuples_fetched
) / (2.0 * T
+ tuples_fetched
);
456 if (pages_fetched
>= T
)
459 pages_fetched
= ceil(pages_fetched
);
465 lim
= (2.0 * T
* b
) / (2.0 * T
- b
);
466 if (tuples_fetched
<= lim
)
469 (2.0 * T
* tuples_fetched
) / (2.0 * T
+ tuples_fetched
);
474 b
+ (tuples_fetched
- lim
) * (T
- b
) / T
;
476 pages_fetched
= ceil(pages_fetched
);
478 return pages_fetched
;
482 * get_indexpath_pages
483 * Determine the total size of the indexes used in a bitmap index path.
485 * Note: if the same index is used more than once in a bitmap tree, we will
486 * count it multiple times, which perhaps is the wrong thing ... but it's
487 * not completely clear, and detecting duplicates is difficult, so ignore it
491 get_indexpath_pages(Path
*bitmapqual
)
496 if (IsA(bitmapqual
, BitmapAndPath
))
498 BitmapAndPath
*apath
= (BitmapAndPath
*) bitmapqual
;
500 foreach(l
, apath
->bitmapquals
)
502 result
+= get_indexpath_pages((Path
*) lfirst(l
));
505 else if (IsA(bitmapqual
, BitmapOrPath
))
507 BitmapOrPath
*opath
= (BitmapOrPath
*) bitmapqual
;
509 foreach(l
, opath
->bitmapquals
)
511 result
+= get_indexpath_pages((Path
*) lfirst(l
));
514 else if (IsA(bitmapqual
, IndexPath
))
516 IndexPath
*ipath
= (IndexPath
*) bitmapqual
;
518 result
= (double) ipath
->indexinfo
->pages
;
521 elog(ERROR
, "unrecognized node type: %d", nodeTag(bitmapqual
));
527 * cost_bitmap_heap_scan
528 * Determines and returns the cost of scanning a relation using a bitmap
529 * index-then-heap plan.
531 * 'baserel' is the relation to be scanned
532 * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
533 * 'outer_rel' is the outer relation when we are considering using the bitmap
534 * scan as the inside of a nestloop join (hence, some of the indexQuals
535 * are join clauses, and we should expect repeated scans of the table);
536 * NULL for a plain bitmap scan
538 * Note: if this is a join inner path, the component IndexPaths in bitmapqual
539 * should have been costed accordingly.
542 cost_bitmap_heap_scan(Path
*path
, PlannerInfo
*root
, RelOptInfo
*baserel
,
543 Path
*bitmapqual
, RelOptInfo
*outer_rel
)
545 Cost startup_cost
= 0;
548 Selectivity indexSelectivity
;
551 double tuples_fetched
;
552 double pages_fetched
;
555 /* Should only be applied to base relations */
556 Assert(IsA(baserel
, RelOptInfo
));
557 Assert(baserel
->relid
> 0);
558 Assert(baserel
->rtekind
== RTE_RELATION
);
560 if (!enable_bitmapscan
)
561 startup_cost
+= disable_cost
;
564 * Fetch total cost of obtaining the bitmap, as well as its total
567 cost_bitmap_tree_node(bitmapqual
, &indexTotalCost
, &indexSelectivity
);
569 startup_cost
+= indexTotalCost
;
572 * Estimate number of main-table pages fetched.
574 tuples_fetched
= clamp_row_est(indexSelectivity
* baserel
->tuples
);
576 T
= (baserel
->pages
> 1) ? (double) baserel
->pages
: 1.0;
578 if (outer_rel
!= NULL
&& outer_rel
->rows
> 1)
581 * For repeated bitmap scans, scale up the number of tuples fetched in
582 * the Mackert and Lohman formula by the number of scans, so that we
583 * estimate the number of pages fetched by all the scans. Then
584 * pro-rate for one scan.
586 double num_scans
= outer_rel
->rows
;
588 pages_fetched
= index_pages_fetched(tuples_fetched
* num_scans
,
590 get_indexpath_pages(bitmapqual
),
592 pages_fetched
/= num_scans
;
597 * For a single scan, the number of heap pages that need to be fetched
598 * is the same as the Mackert and Lohman formula for the case T <= b
599 * (ie, no re-reads needed).
601 pages_fetched
= (2.0 * T
* tuples_fetched
) / (2.0 * T
+ tuples_fetched
);
603 if (pages_fetched
>= T
)
606 pages_fetched
= ceil(pages_fetched
);
609 * For small numbers of pages we should charge random_page_cost apiece,
610 * while if nearly all the table's pages are being read, it's more
611 * appropriate to charge seq_page_cost apiece. The effect is nonlinear,
612 * too. For lack of a better idea, interpolate like this to determine the
615 if (pages_fetched
>= 2.0)
616 cost_per_page
= random_page_cost
-
617 (random_page_cost
- seq_page_cost
) * sqrt(pages_fetched
/ T
);
619 cost_per_page
= random_page_cost
;
621 run_cost
+= pages_fetched
* cost_per_page
;
624 * Estimate CPU costs per tuple.
626 * Often the indexquals don't need to be rechecked at each tuple ... but
627 * not always, especially not if there are enough tuples involved that the
628 * bitmaps become lossy. For the moment, just assume they will be
631 startup_cost
+= baserel
->baserestrictcost
.startup
;
632 cpu_per_tuple
= cpu_tuple_cost
+ baserel
->baserestrictcost
.per_tuple
;
634 run_cost
+= cpu_per_tuple
* tuples_fetched
;
636 path
->startup_cost
= startup_cost
;
637 path
->total_cost
= startup_cost
+ run_cost
;
641 * cost_bitmap_tree_node
642 * Extract cost and selectivity from a bitmap tree node (index/and/or)
645 cost_bitmap_tree_node(Path
*path
, Cost
*cost
, Selectivity
*selec
)
647 if (IsA(path
, IndexPath
))
649 *cost
= ((IndexPath
*) path
)->indextotalcost
;
650 *selec
= ((IndexPath
*) path
)->indexselectivity
;
653 * Charge a small amount per retrieved tuple to reflect the costs of
654 * manipulating the bitmap. This is mostly to make sure that a bitmap
655 * scan doesn't look to be the same cost as an indexscan to retrieve a
658 *cost
+= 0.1 * cpu_operator_cost
* ((IndexPath
*) path
)->rows
;
660 else if (IsA(path
, BitmapAndPath
))
662 *cost
= path
->total_cost
;
663 *selec
= ((BitmapAndPath
*) path
)->bitmapselectivity
;
665 else if (IsA(path
, BitmapOrPath
))
667 *cost
= path
->total_cost
;
668 *selec
= ((BitmapOrPath
*) path
)->bitmapselectivity
;
672 elog(ERROR
, "unrecognized node type: %d", nodeTag(path
));
673 *cost
= *selec
= 0; /* keep compiler quiet */
678 * cost_bitmap_and_node
679 * Estimate the cost of a BitmapAnd node
681 * Note that this considers only the costs of index scanning and bitmap
682 * creation, not the eventual heap access. In that sense the object isn't
683 * truly a Path, but it has enough path-like properties (costs in particular)
684 * to warrant treating it as one.
687 cost_bitmap_and_node(BitmapAndPath
*path
, PlannerInfo
*root
)
694 * We estimate AND selectivity on the assumption that the inputs are
695 * independent. This is probably often wrong, but we don't have the info
698 * The runtime cost of the BitmapAnd itself is estimated at 100x
699 * cpu_operator_cost for each tbm_intersect needed. Probably too small,
700 * definitely too simplistic?
704 foreach(l
, path
->bitmapquals
)
706 Path
*subpath
= (Path
*) lfirst(l
);
708 Selectivity subselec
;
710 cost_bitmap_tree_node(subpath
, &subCost
, &subselec
);
714 totalCost
+= subCost
;
715 if (l
!= list_head(path
->bitmapquals
))
716 totalCost
+= 100.0 * cpu_operator_cost
;
718 path
->bitmapselectivity
= selec
;
719 path
->path
.startup_cost
= totalCost
;
720 path
->path
.total_cost
= totalCost
;
724 * cost_bitmap_or_node
725 * Estimate the cost of a BitmapOr node
727 * See comments for cost_bitmap_and_node.
730 cost_bitmap_or_node(BitmapOrPath
*path
, PlannerInfo
*root
)
737 * We estimate OR selectivity on the assumption that the inputs are
738 * non-overlapping, since that's often the case in "x IN (list)" type
739 * situations. Of course, we clamp to 1.0 at the end.
741 * The runtime cost of the BitmapOr itself is estimated at 100x
742 * cpu_operator_cost for each tbm_union needed. Probably too small,
743 * definitely too simplistic? We are aware that the tbm_unions are
744 * optimized out when the inputs are BitmapIndexScans.
748 foreach(l
, path
->bitmapquals
)
750 Path
*subpath
= (Path
*) lfirst(l
);
752 Selectivity subselec
;
754 cost_bitmap_tree_node(subpath
, &subCost
, &subselec
);
758 totalCost
+= subCost
;
759 if (l
!= list_head(path
->bitmapquals
) &&
760 !IsA(subpath
, IndexPath
))
761 totalCost
+= 100.0 * cpu_operator_cost
;
763 path
->bitmapselectivity
= Min(selec
, 1.0);
764 path
->path
.startup_cost
= totalCost
;
765 path
->path
.total_cost
= totalCost
;
770 * Determines and returns the cost of scanning a relation using TIDs.
773 cost_tidscan(Path
*path
, PlannerInfo
*root
,
774 RelOptInfo
*baserel
, List
*tidquals
)
776 Cost startup_cost
= 0;
778 bool isCurrentOf
= false;
780 QualCost tid_qual_cost
;
784 /* Should only be applied to base relations */
785 Assert(baserel
->relid
> 0);
786 Assert(baserel
->rtekind
== RTE_RELATION
);
788 /* Count how many tuples we expect to retrieve */
792 if (IsA(lfirst(l
), ScalarArrayOpExpr
))
794 /* Each element of the array yields 1 tuple */
795 ScalarArrayOpExpr
*saop
= (ScalarArrayOpExpr
*) lfirst(l
);
796 Node
*arraynode
= (Node
*) lsecond(saop
->args
);
798 ntuples
+= estimate_array_length(arraynode
);
800 else if (IsA(lfirst(l
), CurrentOfExpr
))
802 /* CURRENT OF yields 1 tuple */
808 /* It's just CTID = something, count 1 tuple */
814 * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
815 * understands how to do it correctly. Therefore, honor enable_tidscan
816 * only when CURRENT OF isn't present. Also note that cost_qual_eval
817 * counts a CurrentOfExpr as having startup cost disable_cost, which we
818 * subtract off here; that's to prevent other plan types such as seqscan
823 Assert(baserel
->baserestrictcost
.startup
>= disable_cost
);
824 startup_cost
-= disable_cost
;
826 else if (!enable_tidscan
)
827 startup_cost
+= disable_cost
;
830 * The TID qual expressions will be computed once, any other baserestrict
831 * quals once per retrived tuple.
833 cost_qual_eval(&tid_qual_cost
, tidquals
, root
);
835 /* disk costs --- assume each tuple on a different page */
836 run_cost
+= random_page_cost
* ntuples
;
839 startup_cost
+= baserel
->baserestrictcost
.startup
+
840 tid_qual_cost
.per_tuple
;
841 cpu_per_tuple
= cpu_tuple_cost
+ baserel
->baserestrictcost
.per_tuple
-
842 tid_qual_cost
.per_tuple
;
843 run_cost
+= cpu_per_tuple
* ntuples
;
845 path
->startup_cost
= startup_cost
;
846 path
->total_cost
= startup_cost
+ run_cost
;
851 * Determines and returns the cost of scanning a subquery RTE.
854 cost_subqueryscan(Path
*path
, RelOptInfo
*baserel
)
860 /* Should only be applied to base relations that are subqueries */
861 Assert(baserel
->relid
> 0);
862 Assert(baserel
->rtekind
== RTE_SUBQUERY
);
865 * Cost of path is cost of evaluating the subplan, plus cost of evaluating
866 * any restriction clauses that will be attached to the SubqueryScan node,
867 * plus cpu_tuple_cost to account for selection and projection overhead.
869 path
->startup_cost
= baserel
->subplan
->startup_cost
;
870 path
->total_cost
= baserel
->subplan
->total_cost
;
872 startup_cost
= baserel
->baserestrictcost
.startup
;
873 cpu_per_tuple
= cpu_tuple_cost
+ baserel
->baserestrictcost
.per_tuple
;
874 run_cost
= cpu_per_tuple
* baserel
->tuples
;
876 path
->startup_cost
+= startup_cost
;
877 path
->total_cost
+= startup_cost
+ run_cost
;
882 * Determines and returns the cost of scanning a function RTE.
885 cost_functionscan(Path
*path
, PlannerInfo
*root
, RelOptInfo
*baserel
)
887 Cost startup_cost
= 0;
893 /* Should only be applied to base relations that are functions */
894 Assert(baserel
->relid
> 0);
895 rte
= planner_rt_fetch(baserel
->relid
, root
);
896 Assert(rte
->rtekind
== RTE_FUNCTION
);
898 /* Estimate costs of executing the function expression */
899 cost_qual_eval_node(&exprcost
, rte
->funcexpr
, root
);
901 startup_cost
+= exprcost
.startup
;
902 cpu_per_tuple
= exprcost
.per_tuple
;
904 /* Add scanning CPU costs */
905 startup_cost
+= baserel
->baserestrictcost
.startup
;
906 cpu_per_tuple
+= cpu_tuple_cost
+ baserel
->baserestrictcost
.per_tuple
;
907 run_cost
+= cpu_per_tuple
* baserel
->tuples
;
909 path
->startup_cost
= startup_cost
;
910 path
->total_cost
= startup_cost
+ run_cost
;
915 * Determines and returns the cost of scanning a VALUES RTE.
918 cost_valuesscan(Path
*path
, PlannerInfo
*root
, RelOptInfo
*baserel
)
920 Cost startup_cost
= 0;
924 /* Should only be applied to base relations that are values lists */
925 Assert(baserel
->relid
> 0);
926 Assert(baserel
->rtekind
== RTE_VALUES
);
929 * For now, estimate list evaluation cost at one operator eval per list
930 * (probably pretty bogus, but is it worth being smarter?)
932 cpu_per_tuple
= cpu_operator_cost
;
934 /* Add scanning CPU costs */
935 startup_cost
+= baserel
->baserestrictcost
.startup
;
936 cpu_per_tuple
+= cpu_tuple_cost
+ baserel
->baserestrictcost
.per_tuple
;
937 run_cost
+= cpu_per_tuple
* baserel
->tuples
;
939 path
->startup_cost
= startup_cost
;
940 path
->total_cost
= startup_cost
+ run_cost
;
945 * Determines and returns the cost of scanning a CTE RTE.
947 * Note: this is used for both self-reference and regular CTEs; the
948 * possible cost differences are below the threshold of what we could
949 * estimate accurately anyway. Note that the costs of evaluating the
950 * referenced CTE query are added into the final plan as initplan costs,
951 * and should NOT be counted here.
954 cost_ctescan(Path
*path
, PlannerInfo
*root
, RelOptInfo
*baserel
)
956 Cost startup_cost
= 0;
960 /* Should only be applied to base relations that are CTEs */
961 Assert(baserel
->relid
> 0);
962 Assert(baserel
->rtekind
== RTE_CTE
);
964 /* Charge one CPU tuple cost per row for tuplestore manipulation */
965 cpu_per_tuple
= cpu_tuple_cost
;
967 /* Add scanning CPU costs */
968 startup_cost
+= baserel
->baserestrictcost
.startup
;
969 cpu_per_tuple
+= cpu_tuple_cost
+ baserel
->baserestrictcost
.per_tuple
;
970 run_cost
+= cpu_per_tuple
* baserel
->tuples
;
972 path
->startup_cost
= startup_cost
;
973 path
->total_cost
= startup_cost
+ run_cost
;
977 * cost_recursive_union
978 * Determines and returns the cost of performing a recursive union,
979 * and also the estimated output size.
981 * We are given Plans for the nonrecursive and recursive terms.
983 * Note that the arguments and output are Plans, not Paths as in most of
984 * the rest of this module. That's because we don't bother setting up a
985 * Path representation for recursive union --- we have only one way to do it.
988 cost_recursive_union(Plan
*runion
, Plan
*nrterm
, Plan
*rterm
)
994 /* We probably have decent estimates for the non-recursive term */
995 startup_cost
= nrterm
->startup_cost
;
996 total_cost
= nrterm
->total_cost
;
997 total_rows
= nrterm
->plan_rows
;
1000 * We arbitrarily assume that about 10 recursive iterations will be
1001 * needed, and that we've managed to get a good fix on the cost and output
1002 * size of each one of them. These are mighty shaky assumptions but it's
1003 * hard to see how to do better.
1005 total_cost
+= 10 * rterm
->total_cost
;
1006 total_rows
+= 10 * rterm
->plan_rows
;
1009 * Also charge cpu_tuple_cost per row to account for the costs of
1010 * manipulating the tuplestores. (We don't worry about possible
1011 * spill-to-disk costs.)
1013 total_cost
+= cpu_tuple_cost
* total_rows
;
1015 runion
->startup_cost
= startup_cost
;
1016 runion
->total_cost
= total_cost
;
1017 runion
->plan_rows
= total_rows
;
1018 runion
->plan_width
= Max(nrterm
->plan_width
, rterm
->plan_width
);
1023 * Determines and returns the cost of sorting a relation, including
1024 * the cost of reading the input data.
1026 * If the total volume of data to sort is less than work_mem, we will do
1027 * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1028 * comparisons for t tuples.
1030 * If the total volume exceeds work_mem, we switch to a tape-style merge
1031 * algorithm. There will still be about t*log2(t) tuple comparisons in
1032 * total, but we will also need to write and read each tuple once per
1033 * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1034 * number of initial runs formed and M is the merge order used by tuplesort.c.
1035 * Since the average initial run should be about twice work_mem, we have
1036 * disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
1037 * cpu = comparison_cost * t * log2(t)
1039 * If the sort is bounded (i.e., only the first k result tuples are needed)
1040 * and k tuples can fit into work_mem, we use a heap method that keeps only
1041 * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1043 * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1044 * accesses (XXX can't we refine that guess?)
1046 * We charge two operator evals per tuple comparison, which should be in
1047 * the right ballpark in most cases.
1049 * 'pathkeys' is a list of sort keys
1050 * 'input_cost' is the total cost for reading the input data
1051 * 'tuples' is the number of tuples in the relation
1052 * 'width' is the average tuple width in bytes
1053 * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1055 * NOTE: some callers currently pass NIL for pathkeys because they
1056 * can't conveniently supply the sort keys. Since this routine doesn't
1057 * currently do anything with pathkeys anyway, that doesn't matter...
1058 * but if it ever does, it should react gracefully to lack of key data.
1059 * (Actually, the thing we'd most likely be interested in is just the number
1060 * of sort keys, which all callers *could* supply.)
1063 cost_sort(Path
*path
, PlannerInfo
*root
,
1064 List
*pathkeys
, Cost input_cost
, double tuples
, int width
,
1065 double limit_tuples
)
1067 Cost startup_cost
= input_cost
;
1069 double input_bytes
= relation_byte_size(tuples
, width
);
1070 double output_bytes
;
1071 double output_tuples
;
1072 long work_mem_bytes
= work_mem
* 1024L;
1075 startup_cost
+= disable_cost
;
1078 * We want to be sure the cost of a sort is never estimated as zero, even
1079 * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1084 /* Do we have a useful LIMIT? */
1085 if (limit_tuples
> 0 && limit_tuples
< tuples
)
1087 output_tuples
= limit_tuples
;
1088 output_bytes
= relation_byte_size(output_tuples
, width
);
1092 output_tuples
= tuples
;
1093 output_bytes
= input_bytes
;
1096 if (output_bytes
> work_mem_bytes
)
1099 * We'll have to use a disk-based sort of all the tuples
1101 double npages
= ceil(input_bytes
/ BLCKSZ
);
1102 double nruns
= (input_bytes
/ work_mem_bytes
) * 0.5;
1103 double mergeorder
= tuplesort_merge_order(work_mem_bytes
);
1105 double npageaccesses
;
1110 * Assume about two operator evals per tuple comparison and N log2 N
1113 startup_cost
+= 2.0 * cpu_operator_cost
* tuples
* LOG2(tuples
);
1117 /* Compute logM(r) as log(r) / log(M) */
1118 if (nruns
> mergeorder
)
1119 log_runs
= ceil(log(nruns
) / log(mergeorder
));
1122 npageaccesses
= 2.0 * npages
* log_runs
;
1123 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1124 startup_cost
+= npageaccesses
*
1125 (seq_page_cost
* 0.75 + random_page_cost
* 0.25);
1127 else if (tuples
> 2 * output_tuples
|| input_bytes
> work_mem_bytes
)
1130 * We'll use a bounded heap-sort keeping just K tuples in memory, for
1131 * a total number of tuple comparisons of N log2 K; but the constant
1132 * factor is a bit higher than for quicksort. Tweak it so that the
1133 * cost curve is continuous at the crossover point.
1135 startup_cost
+= 2.0 * cpu_operator_cost
* tuples
* LOG2(2.0 * output_tuples
);
1139 /* We'll use plain quicksort on all the input tuples */
1140 startup_cost
+= 2.0 * cpu_operator_cost
* tuples
* LOG2(tuples
);
1144 * Also charge a small amount (arbitrarily set equal to operator cost) per
1145 * extracted tuple. Note it's correct to use tuples not output_tuples
1146 * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1147 * counting the LIMIT otherwise.
1149 run_cost
+= cpu_operator_cost
* tuples
;
1151 path
->startup_cost
= startup_cost
;
1152 path
->total_cost
= startup_cost
+ run_cost
;
1156 * sort_exceeds_work_mem
1157 * Given a finished Sort plan node, detect whether it is expected to
1158 * spill to disk (ie, will need more than work_mem workspace)
1160 * This assumes there will be no available LIMIT.
1163 sort_exceeds_work_mem(Sort
*sort
)
1165 double input_bytes
= relation_byte_size(sort
->plan
.plan_rows
,
1166 sort
->plan
.plan_width
);
1167 long work_mem_bytes
= work_mem
* 1024L;
1169 return (input_bytes
> work_mem_bytes
);
1174 * Determines and returns the cost of materializing a relation, including
1175 * the cost of reading the input data.
1177 * If the total volume of data to materialize exceeds work_mem, we will need
1178 * to write it to disk, so the cost is much higher in that case.
1181 cost_material(Path
*path
,
1182 Cost input_cost
, double tuples
, int width
)
1184 Cost startup_cost
= input_cost
;
1186 double nbytes
= relation_byte_size(tuples
, width
);
1187 long work_mem_bytes
= work_mem
* 1024L;
1190 if (nbytes
> work_mem_bytes
)
1192 double npages
= ceil(nbytes
/ BLCKSZ
);
1194 /* We'll write during startup and read during retrieval */
1195 startup_cost
+= seq_page_cost
* npages
;
1196 run_cost
+= seq_page_cost
* npages
;
1200 * Charge a very small amount per inserted tuple, to reflect bookkeeping
1201 * costs. We use cpu_tuple_cost/10 for this. This is needed to break the
1202 * tie that would otherwise exist between nestloop with A outer,
1203 * materialized B inner and nestloop with B outer, materialized A inner.
1204 * The extra cost ensures we'll prefer materializing the smaller rel.
1206 startup_cost
+= cpu_tuple_cost
* 0.1 * tuples
;
1209 * Also charge a small amount per extracted tuple. We use cpu_tuple_cost
1210 * so that it doesn't appear worthwhile to materialize a bare seqscan.
1212 run_cost
+= cpu_tuple_cost
* tuples
;
1214 path
->startup_cost
= startup_cost
;
1215 path
->total_cost
= startup_cost
+ run_cost
;
1220 * Determines and returns the cost of performing an Agg plan node,
1221 * including the cost of its input.
1223 * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1224 * are for appropriately-sorted input.
1227 cost_agg(Path
*path
, PlannerInfo
*root
,
1228 AggStrategy aggstrategy
, int numAggs
,
1229 int numGroupCols
, double numGroups
,
1230 Cost input_startup_cost
, Cost input_total_cost
,
1231 double input_tuples
)
1237 * We charge one cpu_operator_cost per aggregate function per input tuple,
1238 * and another one per output tuple (corresponding to transfn and finalfn
1239 * calls respectively). If we are grouping, we charge an additional
1240 * cpu_operator_cost per grouping column per input tuple for grouping
1243 * We will produce a single output tuple if not grouping, and a tuple per
1244 * group otherwise. We charge cpu_tuple_cost for each output tuple.
1246 * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1247 * same total CPU cost, but AGG_SORTED has lower startup cost. If the
1248 * input path is already sorted appropriately, AGG_SORTED should be
1249 * preferred (since it has no risk of memory overflow). This will happen
1250 * as long as the computed total costs are indeed exactly equal --- but if
1251 * there's roundoff error we might do the wrong thing. So be sure that
1252 * the computations below form the same intermediate values in the same
1255 * Note: ideally we should use the pg_proc.procost costs of each
1256 * aggregate's component functions, but for now that seems like an
1257 * excessive amount of work.
1259 if (aggstrategy
== AGG_PLAIN
)
1261 startup_cost
= input_total_cost
;
1262 startup_cost
+= cpu_operator_cost
* (input_tuples
+ 1) * numAggs
;
1263 /* we aren't grouping */
1264 total_cost
= startup_cost
+ cpu_tuple_cost
;
1266 else if (aggstrategy
== AGG_SORTED
)
1268 /* Here we are able to deliver output on-the-fly */
1269 startup_cost
= input_startup_cost
;
1270 total_cost
= input_total_cost
;
1271 /* calcs phrased this way to match HASHED case, see note above */
1272 total_cost
+= cpu_operator_cost
* input_tuples
* numGroupCols
;
1273 total_cost
+= cpu_operator_cost
* input_tuples
* numAggs
;
1274 total_cost
+= cpu_operator_cost
* numGroups
* numAggs
;
1275 total_cost
+= cpu_tuple_cost
* numGroups
;
1279 /* must be AGG_HASHED */
1280 startup_cost
= input_total_cost
;
1281 startup_cost
+= cpu_operator_cost
* input_tuples
* numGroupCols
;
1282 startup_cost
+= cpu_operator_cost
* input_tuples
* numAggs
;
1283 total_cost
= startup_cost
;
1284 total_cost
+= cpu_operator_cost
* numGroups
* numAggs
;
1285 total_cost
+= cpu_tuple_cost
* numGroups
;
1288 path
->startup_cost
= startup_cost
;
1289 path
->total_cost
= total_cost
;
1294 * Determines and returns the cost of performing a WindowAgg plan node,
1295 * including the cost of its input.
1297 * Input is assumed already properly sorted.
1300 cost_windowagg(Path
*path
, PlannerInfo
*root
,
1301 int numWindowFuncs
, int numPartCols
, int numOrderCols
,
1302 Cost input_startup_cost
, Cost input_total_cost
,
1303 double input_tuples
)
1308 startup_cost
= input_startup_cost
;
1309 total_cost
= input_total_cost
;
1312 * We charge one cpu_operator_cost per window function per tuple (often a
1313 * drastic underestimate, but without a way to gauge how many tuples the
1314 * window function will fetch, it's hard to do better). We also charge
1315 * cpu_operator_cost per grouping column per tuple for grouping
1316 * comparisons, plus cpu_tuple_cost per tuple for general overhead.
1318 total_cost
+= cpu_operator_cost
* input_tuples
* numWindowFuncs
;
1319 total_cost
+= cpu_operator_cost
* input_tuples
* (numPartCols
+ numOrderCols
);
1320 total_cost
+= cpu_tuple_cost
* input_tuples
;
1322 path
->startup_cost
= startup_cost
;
1323 path
->total_cost
= total_cost
;
1328 * Determines and returns the cost of performing a Group plan node,
1329 * including the cost of its input.
1331 * Note: caller must ensure that input costs are for appropriately-sorted
1335 cost_group(Path
*path
, PlannerInfo
*root
,
1336 int numGroupCols
, double numGroups
,
1337 Cost input_startup_cost
, Cost input_total_cost
,
1338 double input_tuples
)
1343 startup_cost
= input_startup_cost
;
1344 total_cost
= input_total_cost
;
1347 * Charge one cpu_operator_cost per comparison per input tuple. We assume
1348 * all columns get compared at most of the tuples.
1350 total_cost
+= cpu_operator_cost
* input_tuples
* numGroupCols
;
1352 path
->startup_cost
= startup_cost
;
1353 path
->total_cost
= total_cost
;
1357 * If a nestloop's inner path is an indexscan, be sure to use its estimated
1358 * output row count, which may be lower than the restriction-clause-only row
1359 * count of its parent. (We don't include this case in the PATH_ROWS macro
1360 * because it applies *only* to a nestloop's inner relation.) We have to
1361 * be prepared to recurse through Append nodes in case of an appendrel.
1364 nestloop_inner_path_rows(Path
*path
)
1368 if (IsA(path
, IndexPath
))
1369 result
= ((IndexPath
*) path
)->rows
;
1370 else if (IsA(path
, BitmapHeapPath
))
1371 result
= ((BitmapHeapPath
*) path
)->rows
;
1372 else if (IsA(path
, AppendPath
))
1377 foreach(l
, ((AppendPath
*) path
)->subpaths
)
1379 result
+= nestloop_inner_path_rows((Path
*) lfirst(l
));
1383 result
= PATH_ROWS(path
);
1390 * Determines and returns the cost of joining two relations using the
1391 * nested loop algorithm.
1393 * 'path' is already filled in except for the cost fields
1394 * 'sjinfo' is extra info about the join for selectivity estimation
1397 cost_nestloop(NestPath
*path
, PlannerInfo
*root
, SpecialJoinInfo
*sjinfo
)
1399 Path
*outer_path
= path
->outerjoinpath
;
1400 Path
*inner_path
= path
->innerjoinpath
;
1401 Cost startup_cost
= 0;
1403 Cost inner_run_cost
;
1405 QualCost restrict_qual_cost
;
1406 double outer_path_rows
= PATH_ROWS(outer_path
);
1407 double inner_path_rows
= nestloop_inner_path_rows(inner_path
);
1409 Selectivity outer_match_frac
;
1410 Selectivity match_count
;
1411 bool indexed_join_quals
;
1413 if (!enable_nestloop
)
1414 startup_cost
+= disable_cost
;
1416 /* cost of source data */
1419 * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1420 * before we can start returning tuples, so the join's startup cost is
1421 * their sum. What's not so clear is whether the inner path's
1422 * startup_cost must be paid again on each rescan of the inner path. This
1423 * is not true if the inner path is materialized or is a hashjoin, but
1424 * probably is true otherwise.
1426 startup_cost
+= outer_path
->startup_cost
+ inner_path
->startup_cost
;
1427 run_cost
+= outer_path
->total_cost
- outer_path
->startup_cost
;
1428 if (IsA(inner_path
, MaterialPath
) ||
1429 IsA(inner_path
, HashPath
))
1431 /* charge only run cost for each iteration of inner path */
1436 * charge startup cost for each iteration of inner path, except we
1437 * already charged the first startup_cost in our own startup
1439 run_cost
+= (outer_path_rows
- 1) * inner_path
->startup_cost
;
1441 inner_run_cost
= inner_path
->total_cost
- inner_path
->startup_cost
;
1443 if (adjust_semi_join(root
, path
, sjinfo
,
1446 &indexed_join_quals
))
1448 double outer_matched_rows
;
1449 Selectivity inner_scan_frac
;
1452 * SEMI or ANTI join: executor will stop after first match.
1454 * For an outer-rel row that has at least one match, we can expect the
1455 * inner scan to stop after a fraction 1/(match_count+1) of the inner
1456 * rows, if the matches are evenly distributed. Since they probably
1457 * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
1458 * that fraction. (If we used a larger fuzz factor, we'd have to
1459 * clamp inner_scan_frac to at most 1.0; but since match_count is at
1460 * least 1, no such clamp is needed now.)
1462 outer_matched_rows
= rint(outer_path_rows
* outer_match_frac
);
1463 inner_scan_frac
= 2.0 / (match_count
+ 1.0);
1465 /* Add inner run cost for outer tuples having matches */
1466 run_cost
+= outer_matched_rows
* inner_run_cost
* inner_scan_frac
;
1468 /* Compute number of tuples processed (not number emitted!) */
1469 ntuples
= outer_matched_rows
* inner_path_rows
* inner_scan_frac
;
1472 * For unmatched outer-rel rows, there are two cases. If the inner
1473 * path is an indexscan using all the joinquals as indexquals, then an
1474 * unmatched row results in an indexscan returning no rows, which is
1475 * probably quite cheap. We estimate this case as the same cost to
1476 * return the first tuple of a nonempty scan. Otherwise, the executor
1477 * will have to scan the whole inner rel; not so cheap.
1479 if (indexed_join_quals
)
1481 run_cost
+= (outer_path_rows
- outer_matched_rows
) *
1482 inner_run_cost
/ inner_path_rows
;
1483 /* We won't be evaluating any quals at all for these rows */
1487 run_cost
+= (outer_path_rows
- outer_matched_rows
) *
1489 ntuples
+= (outer_path_rows
- outer_matched_rows
) *
1495 /* Normal case; we'll scan whole input rel for each outer row */
1496 run_cost
+= outer_path_rows
* inner_run_cost
;
1498 /* Compute number of tuples processed (not number emitted!) */
1499 ntuples
= outer_path_rows
* inner_path_rows
;
1503 cost_qual_eval(&restrict_qual_cost
, path
->joinrestrictinfo
, root
);
1504 startup_cost
+= restrict_qual_cost
.startup
;
1505 cpu_per_tuple
= cpu_tuple_cost
+ restrict_qual_cost
.per_tuple
;
1506 run_cost
+= cpu_per_tuple
* ntuples
;
1508 path
->path
.startup_cost
= startup_cost
;
1509 path
->path
.total_cost
= startup_cost
+ run_cost
;
1514 * Determines and returns the cost of joining two relations using the
1515 * merge join algorithm.
1517 * 'path' is already filled in except for the cost fields
1518 * 'sjinfo' is extra info about the join for selectivity estimation
1520 * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
1521 * outersortkeys and innersortkeys are lists of the keys to be used
1522 * to sort the outer and inner relations, or NIL if no explicit
1523 * sort is needed because the source path is already ordered.
1526 cost_mergejoin(MergePath
*path
, PlannerInfo
*root
, SpecialJoinInfo
*sjinfo
)
1528 Path
*outer_path
= path
->jpath
.outerjoinpath
;
1529 Path
*inner_path
= path
->jpath
.innerjoinpath
;
1530 List
*mergeclauses
= path
->path_mergeclauses
;
1531 List
*outersortkeys
= path
->outersortkeys
;
1532 List
*innersortkeys
= path
->innersortkeys
;
1533 Cost startup_cost
= 0;
1536 QualCost merge_qual_cost
;
1537 QualCost qp_qual_cost
;
1538 double outer_path_rows
= PATH_ROWS(outer_path
);
1539 double inner_path_rows
= PATH_ROWS(inner_path
);
1544 double mergejointuples
,
1547 Selectivity outerstartsel
,
1551 Path sort_path
; /* dummy for result of cost_sort */
1553 /* Protect some assumptions below that rowcounts aren't zero */
1554 if (outer_path_rows
<= 0)
1555 outer_path_rows
= 1;
1556 if (inner_path_rows
<= 0)
1557 inner_path_rows
= 1;
1559 if (!enable_mergejoin
)
1560 startup_cost
+= disable_cost
;
1563 * Compute cost of the mergequals and qpquals (other restriction clauses)
1566 cost_qual_eval(&merge_qual_cost
, mergeclauses
, root
);
1567 cost_qual_eval(&qp_qual_cost
, path
->jpath
.joinrestrictinfo
, root
);
1568 qp_qual_cost
.startup
-= merge_qual_cost
.startup
;
1569 qp_qual_cost
.per_tuple
-= merge_qual_cost
.per_tuple
;
1572 * Get approx # tuples passing the mergequals. We use approx_tuple_count
1573 * here because we need an estimate done with JOIN_INNER semantics.
1575 mergejointuples
= approx_tuple_count(root
, &path
->jpath
, mergeclauses
);
1578 * When there are equal merge keys in the outer relation, the mergejoin
1579 * must rescan any matching tuples in the inner relation. This means
1580 * re-fetching inner tuples. Our cost model for this is that a re-fetch
1581 * costs the same as an original fetch, which is probably an overestimate;
1582 * but on the other hand we ignore the bookkeeping costs of mark/restore.
1583 * Not clear if it's worth developing a more refined model.
1585 * For regular inner and outer joins, the number of re-fetches can be
1586 * estimated approximately as size of merge join output minus size of
1587 * inner relation. Assume that the distinct key values are 1, 2, ..., and
1588 * denote the number of values of each key in the outer relation as m1,
1589 * m2, ...; in the inner relation, n1, n2, ... Then we have
1591 * size of join = m1 * n1 + m2 * n2 + ...
1593 * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
1594 * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
1597 * This equation works correctly for outer tuples having no inner match
1598 * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
1599 * are effectively subtracting those from the number of rescanned tuples,
1600 * when we should not. Can we do better without expensive selectivity
1603 * The whole issue is moot if we are working from a unique-ified outer
1606 if (IsA(outer_path
, UniquePath
))
1607 rescannedtuples
= 0;
1610 rescannedtuples
= mergejointuples
- inner_path_rows
;
1611 /* Must clamp because of possible underestimate */
1612 if (rescannedtuples
< 0)
1613 rescannedtuples
= 0;
1615 /* We'll inflate inner run cost this much to account for rescanning */
1616 rescanratio
= 1.0 + (rescannedtuples
/ inner_path_rows
);
1619 * A merge join will stop as soon as it exhausts either input stream
1620 * (unless it's an outer join, in which case the outer side has to be
1621 * scanned all the way anyway). Estimate fraction of the left and right
1622 * inputs that will actually need to be scanned. Likewise, we can
1623 * estimate the number of rows that will be skipped before the first join
1624 * pair is found, which should be factored into startup cost. We use only
1625 * the first (most significant) merge clause for this purpose. Since
1626 * mergejoinscansel() is a fairly expensive computation, we cache the
1627 * results in the merge clause RestrictInfo.
1629 if (mergeclauses
&& path
->jpath
.jointype
!= JOIN_FULL
)
1631 RestrictInfo
*firstclause
= (RestrictInfo
*) linitial(mergeclauses
);
1636 MergeScanSelCache
*cache
;
1638 /* Get the input pathkeys to determine the sort-order details */
1639 opathkeys
= outersortkeys
? outersortkeys
: outer_path
->pathkeys
;
1640 ipathkeys
= innersortkeys
? innersortkeys
: inner_path
->pathkeys
;
1643 opathkey
= (PathKey
*) linitial(opathkeys
);
1644 ipathkey
= (PathKey
*) linitial(ipathkeys
);
1645 /* debugging check */
1646 if (opathkey
->pk_opfamily
!= ipathkey
->pk_opfamily
||
1647 opathkey
->pk_strategy
!= ipathkey
->pk_strategy
||
1648 opathkey
->pk_nulls_first
!= ipathkey
->pk_nulls_first
)
1649 elog(ERROR
, "left and right pathkeys do not match in mergejoin");
1651 /* Get the selectivity with caching */
1652 cache
= cached_scansel(root
, firstclause
, opathkey
);
1654 if (bms_is_subset(firstclause
->left_relids
,
1655 outer_path
->parent
->relids
))
1657 /* left side of clause is outer */
1658 outerstartsel
= cache
->leftstartsel
;
1659 outerendsel
= cache
->leftendsel
;
1660 innerstartsel
= cache
->rightstartsel
;
1661 innerendsel
= cache
->rightendsel
;
1665 /* left side of clause is inner */
1666 outerstartsel
= cache
->rightstartsel
;
1667 outerendsel
= cache
->rightendsel
;
1668 innerstartsel
= cache
->leftstartsel
;
1669 innerendsel
= cache
->leftendsel
;
1671 if (path
->jpath
.jointype
== JOIN_LEFT
||
1672 path
->jpath
.jointype
== JOIN_ANTI
)
1674 outerstartsel
= 0.0;
1677 else if (path
->jpath
.jointype
== JOIN_RIGHT
)
1679 innerstartsel
= 0.0;
1685 /* cope with clauseless or full mergejoin */
1686 outerstartsel
= innerstartsel
= 0.0;
1687 outerendsel
= innerendsel
= 1.0;
1691 * Convert selectivities to row counts. We force outer_rows and
1692 * inner_rows to be at least 1, but the skip_rows estimates can be zero.
1694 outer_skip_rows
= rint(outer_path_rows
* outerstartsel
);
1695 inner_skip_rows
= rint(inner_path_rows
* innerstartsel
);
1696 outer_rows
= clamp_row_est(outer_path_rows
* outerendsel
);
1697 inner_rows
= clamp_row_est(inner_path_rows
* innerendsel
);
1699 Assert(outer_skip_rows
<= outer_rows
);
1700 Assert(inner_skip_rows
<= inner_rows
);
1703 * Readjust scan selectivities to account for above rounding. This is
1704 * normally an insignificant effect, but when there are only a few rows in
1705 * the inputs, failing to do this makes for a large percentage error.
1707 outerstartsel
= outer_skip_rows
/ outer_path_rows
;
1708 innerstartsel
= inner_skip_rows
/ inner_path_rows
;
1709 outerendsel
= outer_rows
/ outer_path_rows
;
1710 innerendsel
= inner_rows
/ inner_path_rows
;
1712 Assert(outerstartsel
<= outerendsel
);
1713 Assert(innerstartsel
<= innerendsel
);
1715 /* cost of source data */
1717 if (outersortkeys
) /* do we need to sort outer? */
1719 cost_sort(&sort_path
,
1722 outer_path
->total_cost
,
1724 outer_path
->parent
->width
,
1726 startup_cost
+= sort_path
.startup_cost
;
1727 startup_cost
+= (sort_path
.total_cost
- sort_path
.startup_cost
)
1729 run_cost
+= (sort_path
.total_cost
- sort_path
.startup_cost
)
1730 * (outerendsel
- outerstartsel
);
1734 startup_cost
+= outer_path
->startup_cost
;
1735 startup_cost
+= (outer_path
->total_cost
- outer_path
->startup_cost
)
1737 run_cost
+= (outer_path
->total_cost
- outer_path
->startup_cost
)
1738 * (outerendsel
- outerstartsel
);
1741 if (innersortkeys
) /* do we need to sort inner? */
1743 cost_sort(&sort_path
,
1746 inner_path
->total_cost
,
1748 inner_path
->parent
->width
,
1750 startup_cost
+= sort_path
.startup_cost
;
1751 startup_cost
+= (sort_path
.total_cost
- sort_path
.startup_cost
)
1752 * innerstartsel
* rescanratio
;
1753 run_cost
+= (sort_path
.total_cost
- sort_path
.startup_cost
)
1754 * (innerendsel
- innerstartsel
) * rescanratio
;
1757 * If the inner sort is expected to spill to disk, we want to add a
1758 * materialize node to shield it from the need to handle mark/restore.
1759 * This will allow it to perform the last merge pass on-the-fly, while
1760 * in most cases not requiring the materialize to spill to disk.
1761 * Charge an extra cpu_tuple_cost per tuple to account for the
1762 * materialize node. (Keep this estimate in sync with similar ones in
1763 * create_mergejoin_path and create_mergejoin_plan.)
1765 if (relation_byte_size(inner_path_rows
, inner_path
->parent
->width
) >
1767 run_cost
+= cpu_tuple_cost
* inner_path_rows
;
1771 startup_cost
+= inner_path
->startup_cost
;
1772 startup_cost
+= (inner_path
->total_cost
- inner_path
->startup_cost
)
1773 * innerstartsel
* rescanratio
;
1774 run_cost
+= (inner_path
->total_cost
- inner_path
->startup_cost
)
1775 * (innerendsel
- innerstartsel
) * rescanratio
;
1781 * The number of tuple comparisons needed is approximately number of outer
1782 * rows plus number of inner rows plus number of rescanned tuples (can we
1783 * refine this?). At each one, we need to evaluate the mergejoin quals.
1785 startup_cost
+= merge_qual_cost
.startup
;
1786 startup_cost
+= merge_qual_cost
.per_tuple
*
1787 (outer_skip_rows
+ inner_skip_rows
* rescanratio
);
1788 run_cost
+= merge_qual_cost
.per_tuple
*
1789 ((outer_rows
- outer_skip_rows
) +
1790 (inner_rows
- inner_skip_rows
) * rescanratio
);
1793 * For each tuple that gets through the mergejoin proper, we charge
1794 * cpu_tuple_cost plus the cost of evaluating additional restriction
1795 * clauses that are to be applied at the join. (This is pessimistic since
1796 * not all of the quals may get evaluated at each tuple.)
1798 * Note: we could adjust for SEMI/ANTI joins skipping some qual
1799 * evaluations here, but it's probably not worth the trouble.
1801 startup_cost
+= qp_qual_cost
.startup
;
1802 cpu_per_tuple
= cpu_tuple_cost
+ qp_qual_cost
.per_tuple
;
1803 run_cost
+= cpu_per_tuple
* mergejointuples
;
1805 path
->jpath
.path
.startup_cost
= startup_cost
;
1806 path
->jpath
.path
.total_cost
= startup_cost
+ run_cost
;
1810 * run mergejoinscansel() with caching
1812 static MergeScanSelCache
*
1813 cached_scansel(PlannerInfo
*root
, RestrictInfo
*rinfo
, PathKey
*pathkey
)
1815 MergeScanSelCache
*cache
;
1817 Selectivity leftstartsel
,
1821 MemoryContext oldcontext
;
1823 /* Do we have this result already? */
1824 foreach(lc
, rinfo
->scansel_cache
)
1826 cache
= (MergeScanSelCache
*) lfirst(lc
);
1827 if (cache
->opfamily
== pathkey
->pk_opfamily
&&
1828 cache
->strategy
== pathkey
->pk_strategy
&&
1829 cache
->nulls_first
== pathkey
->pk_nulls_first
)
1833 /* Nope, do the computation */
1834 mergejoinscansel(root
,
1835 (Node
*) rinfo
->clause
,
1836 pathkey
->pk_opfamily
,
1837 pathkey
->pk_strategy
,
1838 pathkey
->pk_nulls_first
,
1844 /* Cache the result in suitably long-lived workspace */
1845 oldcontext
= MemoryContextSwitchTo(root
->planner_cxt
);
1847 cache
= (MergeScanSelCache
*) palloc(sizeof(MergeScanSelCache
));
1848 cache
->opfamily
= pathkey
->pk_opfamily
;
1849 cache
->strategy
= pathkey
->pk_strategy
;
1850 cache
->nulls_first
= pathkey
->pk_nulls_first
;
1851 cache
->leftstartsel
= leftstartsel
;
1852 cache
->leftendsel
= leftendsel
;
1853 cache
->rightstartsel
= rightstartsel
;
1854 cache
->rightendsel
= rightendsel
;
1856 rinfo
->scansel_cache
= lappend(rinfo
->scansel_cache
, cache
);
1858 MemoryContextSwitchTo(oldcontext
);
1865 * Determines and returns the cost of joining two relations using the
1866 * hash join algorithm.
1868 * 'path' is already filled in except for the cost fields
1869 * 'sjinfo' is extra info about the join for selectivity estimation
1871 * Note: path's hashclauses should be a subset of the joinrestrictinfo list
1874 cost_hashjoin(HashPath
*path
, PlannerInfo
*root
, SpecialJoinInfo
*sjinfo
)
1876 Path
*outer_path
= path
->jpath
.outerjoinpath
;
1877 Path
*inner_path
= path
->jpath
.innerjoinpath
;
1878 List
*hashclauses
= path
->path_hashclauses
;
1879 Cost startup_cost
= 0;
1882 QualCost hash_qual_cost
;
1883 QualCost qp_qual_cost
;
1884 double hashjointuples
;
1885 double outer_path_rows
= PATH_ROWS(outer_path
);
1886 double inner_path_rows
= PATH_ROWS(inner_path
);
1887 int num_hashclauses
= list_length(hashclauses
);
1891 double virtualbuckets
;
1892 Selectivity innerbucketsize
;
1893 Selectivity outer_match_frac
;
1894 Selectivity match_count
;
1897 if (!enable_hashjoin
)
1898 startup_cost
+= disable_cost
;
1901 * Compute cost of the hashquals and qpquals (other restriction clauses)
1904 cost_qual_eval(&hash_qual_cost
, hashclauses
, root
);
1905 cost_qual_eval(&qp_qual_cost
, path
->jpath
.joinrestrictinfo
, root
);
1906 qp_qual_cost
.startup
-= hash_qual_cost
.startup
;
1907 qp_qual_cost
.per_tuple
-= hash_qual_cost
.per_tuple
;
1909 /* cost of source data */
1910 startup_cost
+= outer_path
->startup_cost
;
1911 run_cost
+= outer_path
->total_cost
- outer_path
->startup_cost
;
1912 startup_cost
+= inner_path
->total_cost
;
1915 * Cost of computing hash function: must do it once per input tuple. We
1916 * charge one cpu_operator_cost for each column's hash function. Also,
1917 * tack on one cpu_tuple_cost per inner row, to model the costs of
1918 * inserting the row into the hashtable.
1920 * XXX when a hashclause is more complex than a single operator, we really
1921 * should charge the extra eval costs of the left or right side, as
1922 * appropriate, here. This seems more work than it's worth at the moment.
1924 startup_cost
+= (cpu_operator_cost
* num_hashclauses
+ cpu_tuple_cost
)
1926 run_cost
+= cpu_operator_cost
* num_hashclauses
* outer_path_rows
;
1929 * Get hash table size that executor would use for inner relation.
1931 * XXX for the moment, always assume that skew optimization will be
1932 * performed. As long as SKEW_WORK_MEM_PERCENT is small, it's not worth
1933 * trying to determine that for sure.
1935 * XXX at some point it might be interesting to try to account for skew
1936 * optimization in the cost estimate, but for now, we don't.
1938 ExecChooseHashTableSize(inner_path_rows
,
1939 inner_path
->parent
->width
,
1944 virtualbuckets
= (double) numbuckets
*(double) numbatches
;
1946 /* mark the path with estimated # of batches */
1947 path
->num_batches
= numbatches
;
1950 * Determine bucketsize fraction for inner relation. We use the smallest
1951 * bucketsize estimated for any individual hashclause; this is undoubtedly
1954 * BUT: if inner relation has been unique-ified, we can assume it's good
1955 * for hashing. This is important both because it's the right answer, and
1956 * because we avoid contaminating the cache with a value that's wrong for
1957 * non-unique-ified paths.
1959 if (IsA(inner_path
, UniquePath
))
1960 innerbucketsize
= 1.0 / virtualbuckets
;
1963 innerbucketsize
= 1.0;
1964 foreach(hcl
, hashclauses
)
1966 RestrictInfo
*restrictinfo
= (RestrictInfo
*) lfirst(hcl
);
1967 Selectivity thisbucketsize
;
1969 Assert(IsA(restrictinfo
, RestrictInfo
));
1972 * First we have to figure out which side of the hashjoin clause
1973 * is the inner side.
1975 * Since we tend to visit the same clauses over and over when
1976 * planning a large query, we cache the bucketsize estimate in the
1977 * RestrictInfo node to avoid repeated lookups of statistics.
1979 if (bms_is_subset(restrictinfo
->right_relids
,
1980 inner_path
->parent
->relids
))
1982 /* righthand side is inner */
1983 thisbucketsize
= restrictinfo
->right_bucketsize
;
1984 if (thisbucketsize
< 0)
1986 /* not cached yet */
1988 estimate_hash_bucketsize(root
,
1989 get_rightop(restrictinfo
->clause
),
1991 restrictinfo
->right_bucketsize
= thisbucketsize
;
1996 Assert(bms_is_subset(restrictinfo
->left_relids
,
1997 inner_path
->parent
->relids
));
1998 /* lefthand side is inner */
1999 thisbucketsize
= restrictinfo
->left_bucketsize
;
2000 if (thisbucketsize
< 0)
2002 /* not cached yet */
2004 estimate_hash_bucketsize(root
,
2005 get_leftop(restrictinfo
->clause
),
2007 restrictinfo
->left_bucketsize
= thisbucketsize
;
2011 if (innerbucketsize
> thisbucketsize
)
2012 innerbucketsize
= thisbucketsize
;
2017 * If inner relation is too big then we will need to "batch" the join,
2018 * which implies writing and reading most of the tuples to disk an extra
2019 * time. Charge seq_page_cost per page, since the I/O should be nice and
2020 * sequential. Writing the inner rel counts as startup cost, all the rest
2025 double outerpages
= page_size(outer_path_rows
,
2026 outer_path
->parent
->width
);
2027 double innerpages
= page_size(inner_path_rows
,
2028 inner_path
->parent
->width
);
2030 startup_cost
+= seq_page_cost
* innerpages
;
2031 run_cost
+= seq_page_cost
* (innerpages
+ 2 * outerpages
);
2036 if (adjust_semi_join(root
, &path
->jpath
, sjinfo
,
2041 double outer_matched_rows
;
2042 Selectivity inner_scan_frac
;
2045 * SEMI or ANTI join: executor will stop after first match.
2047 * For an outer-rel row that has at least one match, we can expect the
2048 * bucket scan to stop after a fraction 1/(match_count+1) of the
2049 * bucket's rows, if the matches are evenly distributed. Since they
2050 * probably aren't quite evenly distributed, we apply a fuzz factor of
2051 * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
2052 * to clamp inner_scan_frac to at most 1.0; but since match_count is
2053 * at least 1, no such clamp is needed now.)
2055 outer_matched_rows
= rint(outer_path_rows
* outer_match_frac
);
2056 inner_scan_frac
= 2.0 / (match_count
+ 1.0);
2058 startup_cost
+= hash_qual_cost
.startup
;
2059 run_cost
+= hash_qual_cost
.per_tuple
* outer_matched_rows
*
2060 clamp_row_est(inner_path_rows
* innerbucketsize
* inner_scan_frac
) * 0.5;
2063 * For unmatched outer-rel rows, the picture is quite a lot different.
2064 * In the first place, there is no reason to assume that these rows
2065 * preferentially hit heavily-populated buckets; instead assume they
2066 * are uncorrelated with the inner distribution and so they see an
2067 * average bucket size of inner_path_rows / virtualbuckets. In the
2068 * second place, it seems likely that they will have few if any exact
2069 * hash-code matches and so very few of the tuples in the bucket will
2070 * actually require eval of the hash quals. We don't have any good
2071 * way to estimate how many will, but for the moment assume that the
2072 * effective cost per bucket entry is one-tenth what it is for
2075 run_cost
+= hash_qual_cost
.per_tuple
*
2076 (outer_path_rows
- outer_matched_rows
) *
2077 clamp_row_est(inner_path_rows
/ virtualbuckets
) * 0.05;
2079 /* Get # of tuples that will pass the basic join */
2080 if (path
->jpath
.jointype
== JOIN_SEMI
)
2081 hashjointuples
= outer_matched_rows
;
2083 hashjointuples
= outer_path_rows
- outer_matched_rows
;
2088 * The number of tuple comparisons needed is the number of outer
2089 * tuples times the typical number of tuples in a hash bucket, which
2090 * is the inner relation size times its bucketsize fraction. At each
2091 * one, we need to evaluate the hashjoin quals. But actually,
2092 * charging the full qual eval cost at each tuple is pessimistic,
2093 * since we don't evaluate the quals unless the hash values match
2094 * exactly. For lack of a better idea, halve the cost estimate to
2097 startup_cost
+= hash_qual_cost
.startup
;
2098 run_cost
+= hash_qual_cost
.per_tuple
* outer_path_rows
*
2099 clamp_row_est(inner_path_rows
* innerbucketsize
) * 0.5;
2102 * Get approx # tuples passing the hashquals. We use
2103 * approx_tuple_count here because we need an estimate done with
2104 * JOIN_INNER semantics.
2106 hashjointuples
= approx_tuple_count(root
, &path
->jpath
, hashclauses
);
2110 * For each tuple that gets through the hashjoin proper, we charge
2111 * cpu_tuple_cost plus the cost of evaluating additional restriction
2112 * clauses that are to be applied at the join. (This is pessimistic since
2113 * not all of the quals may get evaluated at each tuple.)
2115 startup_cost
+= qp_qual_cost
.startup
;
2116 cpu_per_tuple
= cpu_tuple_cost
+ qp_qual_cost
.per_tuple
;
2117 run_cost
+= cpu_per_tuple
* hashjointuples
;
2119 path
->jpath
.path
.startup_cost
= startup_cost
;
2120 path
->jpath
.path
.total_cost
= startup_cost
+ run_cost
;
2126 * Figure the costs for a SubPlan (or initplan).
2128 * Note: we could dig the subplan's Plan out of the root list, but in practice
2129 * all callers have it handy already, so we make them pass it.
2132 cost_subplan(PlannerInfo
*root
, SubPlan
*subplan
, Plan
*plan
)
2136 /* Figure any cost for evaluating the testexpr */
2137 cost_qual_eval(&sp_cost
,
2138 make_ands_implicit((Expr
*) subplan
->testexpr
),
2141 if (subplan
->useHashTable
)
2144 * If we are using a hash table for the subquery outputs, then the
2145 * cost of evaluating the query is a one-time cost. We charge one
2146 * cpu_operator_cost per tuple for the work of loading the hashtable,
2149 sp_cost
.startup
+= plan
->total_cost
+
2150 cpu_operator_cost
* plan
->plan_rows
;
2153 * The per-tuple costs include the cost of evaluating the lefthand
2154 * expressions, plus the cost of probing the hashtable. We already
2155 * accounted for the lefthand expressions as part of the testexpr, and
2156 * will also have counted one cpu_operator_cost for each comparison
2157 * operator. That is probably too low for the probing cost, but it's
2158 * hard to make a better estimate, so live with it for now.
2164 * Otherwise we will be rescanning the subplan output on each
2165 * evaluation. We need to estimate how much of the output we will
2166 * actually need to scan. NOTE: this logic should agree with the
2167 * tuple_fraction estimates used by make_subplan() in
2170 Cost plan_run_cost
= plan
->total_cost
- plan
->startup_cost
;
2172 if (subplan
->subLinkType
== EXISTS_SUBLINK
)
2174 /* we only need to fetch 1 tuple */
2175 sp_cost
.per_tuple
+= plan_run_cost
/ plan
->plan_rows
;
2177 else if (subplan
->subLinkType
== ALL_SUBLINK
||
2178 subplan
->subLinkType
== ANY_SUBLINK
)
2180 /* assume we need 50% of the tuples */
2181 sp_cost
.per_tuple
+= 0.50 * plan_run_cost
;
2182 /* also charge a cpu_operator_cost per row examined */
2183 sp_cost
.per_tuple
+= 0.50 * plan
->plan_rows
* cpu_operator_cost
;
2187 /* assume we need all tuples */
2188 sp_cost
.per_tuple
+= plan_run_cost
;
2192 * Also account for subplan's startup cost. If the subplan is
2193 * uncorrelated or undirect correlated, AND its topmost node is a Sort
2194 * or Material node, assume that we'll only need to pay its startup
2195 * cost once; otherwise assume we pay the startup cost every time.
2197 if (subplan
->parParam
== NIL
&&
2199 IsA(plan
, Material
)))
2200 sp_cost
.startup
+= plan
->startup_cost
;
2202 sp_cost
.per_tuple
+= plan
->startup_cost
;
2205 subplan
->startup_cost
= sp_cost
.startup
;
2206 subplan
->per_call_cost
= sp_cost
.per_tuple
;
2212 * Estimate the CPU costs of evaluating a WHERE clause.
2213 * The input can be either an implicitly-ANDed list of boolean
2214 * expressions, or a list of RestrictInfo nodes. (The latter is
2215 * preferred since it allows caching of the results.)
2216 * The result includes both a one-time (startup) component,
2217 * and a per-evaluation component.
2220 cost_qual_eval(QualCost
*cost
, List
*quals
, PlannerInfo
*root
)
2222 cost_qual_eval_context context
;
2225 context
.root
= root
;
2226 context
.total
.startup
= 0;
2227 context
.total
.per_tuple
= 0;
2229 /* We don't charge any cost for the implicit ANDing at top level ... */
2233 Node
*qual
= (Node
*) lfirst(l
);
2235 cost_qual_eval_walker(qual
, &context
);
2238 *cost
= context
.total
;
2242 * cost_qual_eval_node
2243 * As above, for a single RestrictInfo or expression.
2246 cost_qual_eval_node(QualCost
*cost
, Node
*qual
, PlannerInfo
*root
)
2248 cost_qual_eval_context context
;
2250 context
.root
= root
;
2251 context
.total
.startup
= 0;
2252 context
.total
.per_tuple
= 0;
2254 cost_qual_eval_walker(qual
, &context
);
2256 *cost
= context
.total
;
2260 cost_qual_eval_walker(Node
*node
, cost_qual_eval_context
*context
)
2266 * RestrictInfo nodes contain an eval_cost field reserved for this
2267 * routine's use, so that it's not necessary to evaluate the qual clause's
2268 * cost more than once. If the clause's cost hasn't been computed yet,
2269 * the field's startup value will contain -1.
2271 if (IsA(node
, RestrictInfo
))
2273 RestrictInfo
*rinfo
= (RestrictInfo
*) node
;
2275 if (rinfo
->eval_cost
.startup
< 0)
2277 cost_qual_eval_context locContext
;
2279 locContext
.root
= context
->root
;
2280 locContext
.total
.startup
= 0;
2281 locContext
.total
.per_tuple
= 0;
2284 * For an OR clause, recurse into the marked-up tree so that we
2285 * set the eval_cost for contained RestrictInfos too.
2287 if (rinfo
->orclause
)
2288 cost_qual_eval_walker((Node
*) rinfo
->orclause
, &locContext
);
2290 cost_qual_eval_walker((Node
*) rinfo
->clause
, &locContext
);
2293 * If the RestrictInfo is marked pseudoconstant, it will be tested
2294 * only once, so treat its cost as all startup cost.
2296 if (rinfo
->pseudoconstant
)
2298 /* count one execution during startup */
2299 locContext
.total
.startup
+= locContext
.total
.per_tuple
;
2300 locContext
.total
.per_tuple
= 0;
2302 rinfo
->eval_cost
= locContext
.total
;
2304 context
->total
.startup
+= rinfo
->eval_cost
.startup
;
2305 context
->total
.per_tuple
+= rinfo
->eval_cost
.per_tuple
;
2306 /* do NOT recurse into children */
2311 * For each operator or function node in the given tree, we charge the
2312 * estimated execution cost given by pg_proc.procost (remember to multiply
2313 * this by cpu_operator_cost).
2315 * Vars and Consts are charged zero, and so are boolean operators (AND,
2316 * OR, NOT). Simplistic, but a lot better than no model at all.
2318 * Note that Aggref and WindowFunc nodes are (and should be) treated like
2319 * Vars --- whatever execution cost they have is absorbed into
2320 * plan-node-specific costing. As far as expression evaluation is
2321 * concerned they're just like Vars.
2323 * Should we try to account for the possibility of short-circuit
2324 * evaluation of AND/OR? Probably *not*, because that would make the
2325 * results depend on the clause ordering, and we are not in any position
2326 * to expect that the current ordering of the clauses is the one that's
2327 * going to end up being used. (Is it worth applying order_qual_clauses
2328 * much earlier in the planning process to fix this?)
2330 if (IsA(node
, FuncExpr
))
2332 context
->total
.per_tuple
+=
2333 get_func_cost(((FuncExpr
*) node
)->funcid
) * cpu_operator_cost
;
2335 else if (IsA(node
, OpExpr
) ||
2336 IsA(node
, DistinctExpr
) ||
2337 IsA(node
, NullIfExpr
))
2339 /* rely on struct equivalence to treat these all alike */
2340 set_opfuncid((OpExpr
*) node
);
2341 context
->total
.per_tuple
+=
2342 get_func_cost(((OpExpr
*) node
)->opfuncid
) * cpu_operator_cost
;
2344 else if (IsA(node
, ScalarArrayOpExpr
))
2347 * Estimate that the operator will be applied to about half of the
2348 * array elements before the answer is determined.
2350 ScalarArrayOpExpr
*saop
= (ScalarArrayOpExpr
*) node
;
2351 Node
*arraynode
= (Node
*) lsecond(saop
->args
);
2353 set_sa_opfuncid(saop
);
2354 context
->total
.per_tuple
+= get_func_cost(saop
->opfuncid
) *
2355 cpu_operator_cost
* estimate_array_length(arraynode
) * 0.5;
2357 else if (IsA(node
, CoerceViaIO
))
2359 CoerceViaIO
*iocoerce
= (CoerceViaIO
*) node
;
2364 /* check the result type's input function */
2365 getTypeInputInfo(iocoerce
->resulttype
,
2366 &iofunc
, &typioparam
);
2367 context
->total
.per_tuple
+= get_func_cost(iofunc
) * cpu_operator_cost
;
2368 /* check the input type's output function */
2369 getTypeOutputInfo(exprType((Node
*) iocoerce
->arg
),
2370 &iofunc
, &typisvarlena
);
2371 context
->total
.per_tuple
+= get_func_cost(iofunc
) * cpu_operator_cost
;
2373 else if (IsA(node
, ArrayCoerceExpr
))
2375 ArrayCoerceExpr
*acoerce
= (ArrayCoerceExpr
*) node
;
2376 Node
*arraynode
= (Node
*) acoerce
->arg
;
2378 if (OidIsValid(acoerce
->elemfuncid
))
2379 context
->total
.per_tuple
+= get_func_cost(acoerce
->elemfuncid
) *
2380 cpu_operator_cost
* estimate_array_length(arraynode
);
2382 else if (IsA(node
, RowCompareExpr
))
2384 /* Conservatively assume we will check all the columns */
2385 RowCompareExpr
*rcexpr
= (RowCompareExpr
*) node
;
2388 foreach(lc
, rcexpr
->opnos
)
2390 Oid opid
= lfirst_oid(lc
);
2392 context
->total
.per_tuple
+= get_func_cost(get_opcode(opid
)) *
2396 else if (IsA(node
, CurrentOfExpr
))
2398 /* Report high cost to prevent selection of anything but TID scan */
2399 context
->total
.startup
+= disable_cost
;
2401 else if (IsA(node
, SubLink
))
2403 /* This routine should not be applied to un-planned expressions */
2404 elog(ERROR
, "cannot handle unplanned sub-select");
2406 else if (IsA(node
, SubPlan
))
2409 * A subplan node in an expression typically indicates that the
2410 * subplan will be executed on each evaluation, so charge accordingly.
2411 * (Sub-selects that can be executed as InitPlans have already been
2412 * removed from the expression.)
2414 SubPlan
*subplan
= (SubPlan
*) node
;
2416 context
->total
.startup
+= subplan
->startup_cost
;
2417 context
->total
.per_tuple
+= subplan
->per_call_cost
;
2420 * We don't want to recurse into the testexpr, because it was already
2421 * counted in the SubPlan node's costs. So we're done.
2425 else if (IsA(node
, AlternativeSubPlan
))
2428 * Arbitrarily use the first alternative plan for costing. (We should
2429 * certainly only include one alternative, and we don't yet have
2430 * enough information to know which one the executor is most likely to
2433 AlternativeSubPlan
*asplan
= (AlternativeSubPlan
*) node
;
2435 return cost_qual_eval_walker((Node
*) linitial(asplan
->subplans
),
2439 /* recurse into children */
2440 return expression_tree_walker(node
, cost_qual_eval_walker
,
2447 * Estimate how much of the inner input a SEMI or ANTI join
2448 * can be expected to scan.
2450 * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
2451 * inner rows as soon as it finds a match to the current outer row.
2452 * We should therefore adjust some of the cost components for this effect.
2453 * This function computes some estimates needed for these adjustments.
2455 * 'path' is already filled in except for the cost fields
2456 * 'sjinfo' is extra info about the join for selectivity estimation
2458 * Returns TRUE if this is a SEMI or ANTI join, FALSE if not.
2460 * Output parameters (set only in TRUE-result case):
2461 * *outer_match_frac is set to the fraction of the outer tuples that are
2462 * expected to have at least one match.
2463 * *match_count is set to the average number of matches expected for
2464 * outer tuples that have at least one match.
2465 * *indexed_join_quals is set to TRUE if all the joinquals are used as
2466 * inner index quals, FALSE if not.
2468 * indexed_join_quals can be passed as NULL if that information is not
2469 * relevant (it is only useful for the nestloop case).
2472 adjust_semi_join(PlannerInfo
*root
, JoinPath
*path
, SpecialJoinInfo
*sjinfo
,
2473 Selectivity
*outer_match_frac
,
2474 Selectivity
*match_count
,
2475 bool *indexed_join_quals
)
2477 JoinType jointype
= path
->jointype
;
2480 Selectivity avgmatch
;
2481 SpecialJoinInfo norm_sjinfo
;
2485 /* Fall out if it's not JOIN_SEMI or JOIN_ANTI */
2486 if (jointype
!= JOIN_SEMI
&& jointype
!= JOIN_ANTI
)
2490 * Note: it's annoying to repeat this selectivity estimation on each call,
2491 * when the joinclause list will be the same for all path pairs
2492 * implementing a given join. clausesel.c will save us from the worst
2493 * effects of this by caching at the RestrictInfo level; but perhaps it'd
2494 * be worth finding a way to cache the results at a higher level.
2498 * In an ANTI join, we must ignore clauses that are "pushed down", since
2499 * those won't affect the match logic. In a SEMI join, we do not
2500 * distinguish joinquals from "pushed down" quals, so just use the whole
2501 * restrictinfo list.
2503 if (jointype
== JOIN_ANTI
)
2506 foreach(l
, path
->joinrestrictinfo
)
2508 RestrictInfo
*rinfo
= (RestrictInfo
*) lfirst(l
);
2510 Assert(IsA(rinfo
, RestrictInfo
));
2511 if (!rinfo
->is_pushed_down
)
2512 joinquals
= lappend(joinquals
, rinfo
);
2516 joinquals
= path
->joinrestrictinfo
;
2519 * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
2521 jselec
= clauselist_selectivity(root
,
2528 * Also get the normal inner-join selectivity of the join clauses.
2530 norm_sjinfo
.type
= T_SpecialJoinInfo
;
2531 norm_sjinfo
.min_lefthand
= path
->outerjoinpath
->parent
->relids
;
2532 norm_sjinfo
.min_righthand
= path
->innerjoinpath
->parent
->relids
;
2533 norm_sjinfo
.syn_lefthand
= path
->outerjoinpath
->parent
->relids
;
2534 norm_sjinfo
.syn_righthand
= path
->innerjoinpath
->parent
->relids
;
2535 norm_sjinfo
.jointype
= JOIN_INNER
;
2536 /* we don't bother trying to make the remaining fields valid */
2537 norm_sjinfo
.lhs_strict
= false;
2538 norm_sjinfo
.delay_upper_joins
= false;
2539 norm_sjinfo
.join_quals
= NIL
;
2541 nselec
= clauselist_selectivity(root
,
2547 /* Avoid leaking a lot of ListCells */
2548 if (jointype
== JOIN_ANTI
)
2549 list_free(joinquals
);
2552 * jselec can be interpreted as the fraction of outer-rel rows that have
2553 * any matches (this is true for both SEMI and ANTI cases). And nselec is
2554 * the fraction of the Cartesian product that matches. So, the average
2555 * number of matches for each outer-rel row that has at least one match is
2556 * nselec * inner_rows / jselec.
2558 * Note: it is correct to use the inner rel's "rows" count here, not
2559 * PATH_ROWS(), even if the inner path under consideration is an inner
2560 * indexscan. This is because we have included all the join clauses in
2561 * the selectivity estimate, even ones used in an inner indexscan.
2563 if (jselec
> 0) /* protect against zero divide */
2565 avgmatch
= nselec
* path
->innerjoinpath
->parent
->rows
/ jselec
;
2566 /* Clamp to sane range */
2567 avgmatch
= Max(1.0, avgmatch
);
2572 *outer_match_frac
= jselec
;
2573 *match_count
= avgmatch
;
2576 * If requested, check whether the inner path uses all the joinquals as
2577 * indexquals. (If that's true, we can assume that an unmatched outer
2578 * tuple is cheap to process, whereas otherwise it's probably expensive.)
2580 if (indexed_join_quals
)
2584 nrclauses
= select_nonredundant_join_clauses(root
,
2585 path
->joinrestrictinfo
,
2586 path
->innerjoinpath
);
2587 *indexed_join_quals
= (nrclauses
== NIL
);
2595 * approx_tuple_count
2596 * Quick-and-dirty estimation of the number of join rows passing
2597 * a set of qual conditions.
2599 * The quals can be either an implicitly-ANDed list of boolean expressions,
2600 * or a list of RestrictInfo nodes (typically the latter).
2602 * We intentionally compute the selectivity under JOIN_INNER rules, even
2603 * if it's some type of outer join. This is appropriate because we are
2604 * trying to figure out how many tuples pass the initial merge or hash
2607 * This is quick-and-dirty because we bypass clauselist_selectivity, and
2608 * simply multiply the independent clause selectivities together. Now
2609 * clauselist_selectivity often can't do any better than that anyhow, but
2610 * for some situations (such as range constraints) it is smarter. However,
2611 * we can't effectively cache the results of clauselist_selectivity, whereas
2612 * the individual clause selectivities can be and are cached.
2614 * Since we are only using the results to estimate how many potential
2615 * output tuples are generated and passed through qpqual checking, it
2616 * seems OK to live with the approximation.
2619 approx_tuple_count(PlannerInfo
*root
, JoinPath
*path
, List
*quals
)
2622 double outer_tuples
= path
->outerjoinpath
->parent
->rows
;
2623 double inner_tuples
= path
->innerjoinpath
->parent
->rows
;
2624 SpecialJoinInfo sjinfo
;
2625 Selectivity selec
= 1.0;
2629 * Make up a SpecialJoinInfo for JOIN_INNER semantics.
2631 sjinfo
.type
= T_SpecialJoinInfo
;
2632 sjinfo
.min_lefthand
= path
->outerjoinpath
->parent
->relids
;
2633 sjinfo
.min_righthand
= path
->innerjoinpath
->parent
->relids
;
2634 sjinfo
.syn_lefthand
= path
->outerjoinpath
->parent
->relids
;
2635 sjinfo
.syn_righthand
= path
->innerjoinpath
->parent
->relids
;
2636 sjinfo
.jointype
= JOIN_INNER
;
2637 /* we don't bother trying to make the remaining fields valid */
2638 sjinfo
.lhs_strict
= false;
2639 sjinfo
.delay_upper_joins
= false;
2640 sjinfo
.join_quals
= NIL
;
2642 /* Get the approximate selectivity */
2645 Node
*qual
= (Node
*) lfirst(l
);
2647 /* Note that clause_selectivity will be able to cache its result */
2648 selec
*= clause_selectivity(root
, qual
, 0, JOIN_INNER
, &sjinfo
);
2651 /* Apply it to the input relation sizes */
2652 tuples
= selec
* outer_tuples
* inner_tuples
;
2654 return clamp_row_est(tuples
);
2659 * set_baserel_size_estimates
2660 * Set the size estimates for the given base relation.
2662 * The rel's targetlist and restrictinfo list must have been constructed
2665 * We set the following fields of the rel node:
2666 * rows: the estimated number of output tuples (after applying
2667 * restriction clauses).
2668 * width: the estimated average output tuple width in bytes.
2669 * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
2672 set_baserel_size_estimates(PlannerInfo
*root
, RelOptInfo
*rel
)
2676 /* Should only be applied to base relations */
2677 Assert(rel
->relid
> 0);
2679 nrows
= rel
->tuples
*
2680 clauselist_selectivity(root
,
2681 rel
->baserestrictinfo
,
2686 rel
->rows
= clamp_row_est(nrows
);
2688 cost_qual_eval(&rel
->baserestrictcost
, rel
->baserestrictinfo
, root
);
2690 set_rel_width(root
, rel
);
2694 * set_joinrel_size_estimates
2695 * Set the size estimates for the given join relation.
2697 * The rel's targetlist must have been constructed already, and a
2698 * restriction clause list that matches the given component rels must
2701 * Since there is more than one way to make a joinrel for more than two
2702 * base relations, the results we get here could depend on which component
2703 * rel pair is provided. In theory we should get the same answers no matter
2704 * which pair is provided; in practice, since the selectivity estimation
2705 * routines don't handle all cases equally well, we might not. But there's
2706 * not much to be done about it. (Would it make sense to repeat the
2707 * calculations for each pair of input rels that's encountered, and somehow
2708 * average the results? Probably way more trouble than it's worth.)
2710 * We set only the rows field here. The width field was already set by
2711 * build_joinrel_tlist, and baserestrictcost is not used for join rels.
2714 set_joinrel_size_estimates(PlannerInfo
*root
, RelOptInfo
*rel
,
2715 RelOptInfo
*outer_rel
,
2716 RelOptInfo
*inner_rel
,
2717 SpecialJoinInfo
*sjinfo
,
2720 JoinType jointype
= sjinfo
->jointype
;
2726 * Compute joinclause selectivity. Note that we are only considering
2727 * clauses that become restriction clauses at this join level; we are not
2728 * double-counting them because they were not considered in estimating the
2729 * sizes of the component rels.
2731 * For an outer join, we have to distinguish the selectivity of the join's
2732 * own clauses (JOIN/ON conditions) from any clauses that were "pushed
2733 * down". For inner joins we just count them all as joinclauses.
2735 if (IS_OUTER_JOIN(jointype
))
2737 List
*joinquals
= NIL
;
2738 List
*pushedquals
= NIL
;
2741 /* Grovel through the clauses to separate into two lists */
2742 foreach(l
, restrictlist
)
2744 RestrictInfo
*rinfo
= (RestrictInfo
*) lfirst(l
);
2746 Assert(IsA(rinfo
, RestrictInfo
));
2747 if (rinfo
->is_pushed_down
)
2748 pushedquals
= lappend(pushedquals
, rinfo
);
2750 joinquals
= lappend(joinquals
, rinfo
);
2753 /* Get the separate selectivities */
2754 jselec
= clauselist_selectivity(root
,
2759 pselec
= clauselist_selectivity(root
,
2765 /* Avoid leaking a lot of ListCells */
2766 list_free(joinquals
);
2767 list_free(pushedquals
);
2771 jselec
= clauselist_selectivity(root
,
2776 pselec
= 0.0; /* not used, keep compiler quiet */
2780 * Basically, we multiply size of Cartesian product by selectivity.
2782 * If we are doing an outer join, take that into account: the joinqual
2783 * selectivity has to be clamped using the knowledge that the output must
2784 * be at least as large as the non-nullable input. However, any
2785 * pushed-down quals are applied after the outer join, so their
2786 * selectivity applies fully.
2788 * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
2789 * of LHS rows that have matches, and we apply that straightforwardly.
2794 nrows
= outer_rel
->rows
* inner_rel
->rows
* jselec
;
2797 nrows
= outer_rel
->rows
* inner_rel
->rows
* jselec
;
2798 if (nrows
< outer_rel
->rows
)
2799 nrows
= outer_rel
->rows
;
2803 nrows
= outer_rel
->rows
* inner_rel
->rows
* jselec
;
2804 if (nrows
< outer_rel
->rows
)
2805 nrows
= outer_rel
->rows
;
2806 if (nrows
< inner_rel
->rows
)
2807 nrows
= inner_rel
->rows
;
2811 nrows
= outer_rel
->rows
* jselec
;
2812 /* pselec not used */
2815 nrows
= outer_rel
->rows
* (1.0 - jselec
);
2819 /* other values not expected here */
2820 elog(ERROR
, "unrecognized join type: %d", (int) jointype
);
2821 nrows
= 0; /* keep compiler quiet */
2825 rel
->rows
= clamp_row_est(nrows
);
2829 * set_function_size_estimates
2830 * Set the size estimates for a base relation that is a function call.
2832 * The rel's targetlist and restrictinfo list must have been constructed
2835 * We set the same fields as set_baserel_size_estimates.
2838 set_function_size_estimates(PlannerInfo
*root
, RelOptInfo
*rel
)
2842 /* Should only be applied to base relations that are functions */
2843 Assert(rel
->relid
> 0);
2844 rte
= planner_rt_fetch(rel
->relid
, root
);
2845 Assert(rte
->rtekind
== RTE_FUNCTION
);
2847 /* Estimate number of rows the function itself will return */
2848 rel
->tuples
= clamp_row_est(expression_returns_set_rows(rte
->funcexpr
));
2850 /* Now estimate number of output rows, etc */
2851 set_baserel_size_estimates(root
, rel
);
2855 * set_values_size_estimates
2856 * Set the size estimates for a base relation that is a values list.
2858 * The rel's targetlist and restrictinfo list must have been constructed
2861 * We set the same fields as set_baserel_size_estimates.
2864 set_values_size_estimates(PlannerInfo
*root
, RelOptInfo
*rel
)
2868 /* Should only be applied to base relations that are values lists */
2869 Assert(rel
->relid
> 0);
2870 rte
= planner_rt_fetch(rel
->relid
, root
);
2871 Assert(rte
->rtekind
== RTE_VALUES
);
2874 * Estimate number of rows the values list will return. We know this
2875 * precisely based on the list length (well, barring set-returning
2876 * functions in list items, but that's a refinement not catered for
2877 * anywhere else either).
2879 rel
->tuples
= list_length(rte
->values_lists
);
2881 /* Now estimate number of output rows, etc */
2882 set_baserel_size_estimates(root
, rel
);
2886 * set_cte_size_estimates
2887 * Set the size estimates for a base relation that is a CTE reference.
2889 * The rel's targetlist and restrictinfo list must have been constructed
2890 * already, and we need the completed plan for the CTE (if a regular CTE)
2891 * or the non-recursive term (if a self-reference).
2893 * We set the same fields as set_baserel_size_estimates.
2896 set_cte_size_estimates(PlannerInfo
*root
, RelOptInfo
*rel
, Plan
*cteplan
)
2900 /* Should only be applied to base relations that are CTE references */
2901 Assert(rel
->relid
> 0);
2902 rte
= planner_rt_fetch(rel
->relid
, root
);
2903 Assert(rte
->rtekind
== RTE_CTE
);
2905 if (rte
->self_reference
)
2908 * In a self-reference, arbitrarily assume the average worktable size
2909 * is about 10 times the nonrecursive term's size.
2911 rel
->tuples
= 10 * cteplan
->plan_rows
;
2915 /* Otherwise just believe the CTE plan's output estimate */
2916 rel
->tuples
= cteplan
->plan_rows
;
2919 /* Now estimate number of output rows, etc */
2920 set_baserel_size_estimates(root
, rel
);
2926 * Set the estimated output width of a base relation.
2928 * NB: this works best on plain relations because it prefers to look at
2929 * real Vars. It will fail to make use of pg_statistic info when applied
2930 * to a subquery relation, even if the subquery outputs are simple vars
2931 * that we could have gotten info for. Is it worth trying to be smarter
2934 * The per-attribute width estimates are cached for possible re-use while
2935 * building join relations.
2938 set_rel_width(PlannerInfo
*root
, RelOptInfo
*rel
)
2940 Oid reloid
= planner_rt_fetch(rel
->relid
, root
)->relid
;
2941 int32 tuple_width
= 0;
2944 foreach(lc
, rel
->reltargetlist
)
2946 Node
*node
= (Node
*) lfirst(lc
);
2950 Var
*var
= (Var
*) node
;
2954 Assert(var
->varno
== rel
->relid
);
2955 Assert(var
->varattno
>= rel
->min_attr
);
2956 Assert(var
->varattno
<= rel
->max_attr
);
2958 ndx
= var
->varattno
- rel
->min_attr
;
2961 * The width probably hasn't been cached yet, but may as well
2964 if (rel
->attr_widths
[ndx
] > 0)
2966 tuple_width
+= rel
->attr_widths
[ndx
];
2970 /* Try to get column width from statistics */
2971 if (reloid
!= InvalidOid
)
2973 item_width
= get_attavgwidth(reloid
, var
->varattno
);
2976 rel
->attr_widths
[ndx
] = item_width
;
2977 tuple_width
+= item_width
;
2983 * Not a plain relation, or can't find statistics for it. Estimate
2984 * using just the type info.
2986 item_width
= get_typavgwidth(var
->vartype
, var
->vartypmod
);
2987 Assert(item_width
> 0);
2988 rel
->attr_widths
[ndx
] = item_width
;
2989 tuple_width
+= item_width
;
2991 else if (IsA(node
, PlaceHolderVar
))
2993 PlaceHolderVar
*phv
= (PlaceHolderVar
*) node
;
2994 PlaceHolderInfo
*phinfo
= find_placeholder_info(root
, phv
);
2996 tuple_width
+= phinfo
->ph_width
;
3000 /* For now, punt on whole-row child Vars */
3001 tuple_width
+= 32; /* arbitrary */
3004 Assert(tuple_width
>= 0);
3005 rel
->width
= tuple_width
;
3009 * relation_byte_size
3010 * Estimate the storage space in bytes for a given number of tuples
3011 * of a given width (size in bytes).
3014 relation_byte_size(double tuples
, int width
)
3016 return tuples
* (MAXALIGN(width
) + MAXALIGN(sizeof(HeapTupleHeaderData
)));
3021 * Returns an estimate of the number of pages covered by a given
3022 * number of tuples of a given width (size in bytes).
3025 page_size(double tuples
, int width
)
3027 return ceil(relation_byte_size(tuples
, width
) / BLCKSZ
);