1 /*-------------------------------------------------------------------------
4 * the Postgres statistics generator
6 * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
13 *-------------------------------------------------------------------------
19 #include "access/heapam.h"
20 #include "access/transam.h"
21 #include "access/tuptoaster.h"
22 #include "access/xact.h"
23 #include "catalog/index.h"
24 #include "catalog/indexing.h"
25 #include "catalog/namespace.h"
26 #include "catalog/pg_namespace.h"
27 #include "commands/dbcommands.h"
28 #include "commands/vacuum.h"
29 #include "executor/executor.h"
30 #include "miscadmin.h"
31 #include "nodes/nodeFuncs.h"
32 #include "parser/parse_oper.h"
33 #include "parser/parse_relation.h"
35 #include "postmaster/autovacuum.h"
36 #include "storage/bufmgr.h"
37 #include "storage/proc.h"
38 #include "storage/procarray.h"
39 #include "utils/acl.h"
40 #include "utils/datum.h"
41 #include "utils/lsyscache.h"
42 #include "utils/memutils.h"
43 #include "utils/pg_rusage.h"
44 #include "utils/syscache.h"
45 #include "utils/tuplesort.h"
46 #include "utils/tqual.h"
49 /* Data structure for Algorithm S from Knuth 3.4.2 */
52 BlockNumber N
; /* number of blocks, known in advance */
53 int n
; /* desired sample size */
54 BlockNumber t
; /* current block number */
55 int m
; /* blocks selected so far */
57 typedef BlockSamplerData
*BlockSampler
;
59 /* Per-index data for ANALYZE */
60 typedef struct AnlIndexData
62 IndexInfo
*indexInfo
; /* BuildIndexInfo result */
63 double tupleFract
; /* fraction of rows for partial index */
64 VacAttrStats
**vacattrstats
; /* index attrs to analyze */
69 /* Default statistics target (GUC parameter) */
70 int default_statistics_target
= 100;
72 /* A few variables that don't seem worth passing around as parameters */
73 static int elevel
= -1;
75 static MemoryContext anl_context
= NULL
;
77 static BufferAccessStrategy vac_strategy
;
80 static void BlockSampler_Init(BlockSampler bs
, BlockNumber nblocks
,
82 static bool BlockSampler_HasMore(BlockSampler bs
);
83 static BlockNumber
BlockSampler_Next(BlockSampler bs
);
84 static void compute_index_stats(Relation onerel
, double totalrows
,
85 AnlIndexData
*indexdata
, int nindexes
,
86 HeapTuple
*rows
, int numrows
,
87 MemoryContext col_context
);
88 static VacAttrStats
*examine_attribute(Relation onerel
, int attnum
);
89 static int acquire_sample_rows(Relation onerel
, HeapTuple
*rows
,
90 int targrows
, double *totalrows
, double *totaldeadrows
);
91 static double random_fract(void);
92 static double init_selection_state(int n
);
93 static double get_next_S(double t
, int n
, double *stateptr
);
94 static int compare_rows(const void *a
, const void *b
);
95 static void update_attstats(Oid relid
, int natts
, VacAttrStats
**vacattrstats
);
96 static Datum
std_fetch_func(VacAttrStatsP stats
, int rownum
, bool *isNull
);
97 static Datum
ind_fetch_func(VacAttrStatsP stats
, int rownum
, bool *isNull
);
99 static bool std_typanalyze(VacAttrStats
*stats
);
103 * analyze_rel() -- analyze one relation
105 * If update_reltuples is true, we update reltuples and relpages columns
106 * in pg_class. Caller should pass false if we're part of VACUUM ANALYZE,
107 * and the VACUUM didn't skip any pages. We only have an approximate count,
108 * so we don't want to overwrite the accurate values already inserted by the
109 * VACUUM in that case. VACUUM always scans all indexes, however, so the
110 * pg_class entries for indexes are never updated if we're part of VACUUM
114 analyze_rel(Oid relid
, VacuumStmt
*vacstmt
,
115 BufferAccessStrategy bstrategy
, bool update_reltuples
)
125 bool analyzableindex
;
126 VacAttrStats
**vacattrstats
;
127 AnlIndexData
*indexdata
;
134 TimestampTz starttime
= 0;
138 if (vacstmt
->verbose
)
143 vac_strategy
= bstrategy
;
146 * Use the current context for storing analysis info. vacuum.c ensures
147 * that this context will be cleared when I return, thus releasing the
148 * memory allocated here.
150 anl_context
= CurrentMemoryContext
;
153 * Check for user-requested abort. Note we want this to be inside a
154 * transaction, so xact.c doesn't issue useless WARNING.
156 CHECK_FOR_INTERRUPTS();
159 * Open the relation, getting ShareUpdateExclusiveLock to ensure that two
160 * ANALYZEs don't run on it concurrently. (This also locks out a
161 * concurrent VACUUM, which doesn't matter much at the moment but might
162 * matter if we ever try to accumulate stats on dead tuples.) If the rel
163 * has been dropped since we last saw it, we don't need to process it.
165 onerel
= try_relation_open(relid
, ShareUpdateExclusiveLock
);
170 * Check permissions --- this should match vacuum's check!
172 if (!(pg_class_ownercheck(RelationGetRelid(onerel
), GetUserId()) ||
173 (pg_database_ownercheck(MyDatabaseId
, GetUserId()) && !onerel
->rd_rel
->relisshared
)))
175 /* No need for a WARNING if we already complained during VACUUM */
176 if (!vacstmt
->vacuum
)
178 if (onerel
->rd_rel
->relisshared
)
180 (errmsg("skipping \"%s\" --- only superuser can analyze it",
181 RelationGetRelationName(onerel
))));
182 else if (onerel
->rd_rel
->relnamespace
== PG_CATALOG_NAMESPACE
)
184 (errmsg("skipping \"%s\" --- only superuser or database owner can analyze it",
185 RelationGetRelationName(onerel
))));
188 (errmsg("skipping \"%s\" --- only table or database owner can analyze it",
189 RelationGetRelationName(onerel
))));
191 relation_close(onerel
, ShareUpdateExclusiveLock
);
196 * Check that it's a plain table; we used to do this in get_rel_oids() but
197 * seems safer to check after we've locked the relation.
199 if (onerel
->rd_rel
->relkind
!= RELKIND_RELATION
)
201 /* No need for a WARNING if we already complained during VACUUM */
202 if (!vacstmt
->vacuum
)
204 (errmsg("skipping \"%s\" --- cannot analyze indexes, views, or special system tables",
205 RelationGetRelationName(onerel
))));
206 relation_close(onerel
, ShareUpdateExclusiveLock
);
211 * Silently ignore tables that are temp tables of other backends ---
212 * trying to analyze these is rather pointless, since their contents are
213 * probably not up-to-date on disk. (We don't throw a warning here; it
214 * would just lead to chatter during a database-wide ANALYZE.)
216 if (RELATION_IS_OTHER_TEMP(onerel
))
218 relation_close(onerel
, ShareUpdateExclusiveLock
);
223 * We can ANALYZE any table except pg_statistic. See update_attstats
225 if (RelationGetRelid(onerel
) == StatisticRelationId
)
227 relation_close(onerel
, ShareUpdateExclusiveLock
);
232 (errmsg("analyzing \"%s.%s\"",
233 get_namespace_name(RelationGetNamespace(onerel
)),
234 RelationGetRelationName(onerel
))));
237 * Switch to the table owner's userid, so that any index functions are run
240 GetUserIdAndContext(&save_userid
, &save_secdefcxt
);
241 SetUserIdAndContext(onerel
->rd_rel
->relowner
, true);
243 /* let others know what I'm doing */
244 LWLockAcquire(ProcArrayLock
, LW_EXCLUSIVE
);
245 MyProc
->vacuumFlags
|= PROC_IN_ANALYZE
;
246 LWLockRelease(ProcArrayLock
);
248 /* measure elapsed time iff autovacuum logging requires it */
249 if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration
>= 0)
251 pg_rusage_init(&ru0
);
252 if (Log_autovacuum_min_duration
> 0)
253 starttime
= GetCurrentTimestamp();
257 * Determine which columns to analyze
259 * Note that system attributes are never analyzed.
261 if (vacstmt
->va_cols
!= NIL
)
265 vacattrstats
= (VacAttrStats
**) palloc(list_length(vacstmt
->va_cols
) *
266 sizeof(VacAttrStats
*));
268 foreach(le
, vacstmt
->va_cols
)
270 char *col
= strVal(lfirst(le
));
272 i
= attnameAttNum(onerel
, col
, false);
273 if (i
== InvalidAttrNumber
)
275 (errcode(ERRCODE_UNDEFINED_COLUMN
),
276 errmsg("column \"%s\" of relation \"%s\" does not exist",
277 col
, RelationGetRelationName(onerel
))));
278 vacattrstats
[tcnt
] = examine_attribute(onerel
, i
);
279 if (vacattrstats
[tcnt
] != NULL
)
286 attr_cnt
= onerel
->rd_att
->natts
;
287 vacattrstats
= (VacAttrStats
**)
288 palloc(attr_cnt
* sizeof(VacAttrStats
*));
290 for (i
= 1; i
<= attr_cnt
; i
++)
292 vacattrstats
[tcnt
] = examine_attribute(onerel
, i
);
293 if (vacattrstats
[tcnt
] != NULL
)
300 * Open all indexes of the relation, and see if there are any analyzable
301 * columns in the indexes. We do not analyze index columns if there was
302 * an explicit column list in the ANALYZE command, however.
304 vac_open_indexes(onerel
, AccessShareLock
, &nindexes
, &Irel
);
305 hasindex
= (nindexes
> 0);
307 analyzableindex
= false;
310 indexdata
= (AnlIndexData
*) palloc0(nindexes
* sizeof(AnlIndexData
));
311 for (ind
= 0; ind
< nindexes
; ind
++)
313 AnlIndexData
*thisdata
= &indexdata
[ind
];
314 IndexInfo
*indexInfo
;
316 thisdata
->indexInfo
= indexInfo
= BuildIndexInfo(Irel
[ind
]);
317 thisdata
->tupleFract
= 1.0; /* fix later if partial */
318 if (indexInfo
->ii_Expressions
!= NIL
&& vacstmt
->va_cols
== NIL
)
320 ListCell
*indexpr_item
= list_head(indexInfo
->ii_Expressions
);
322 thisdata
->vacattrstats
= (VacAttrStats
**)
323 palloc(indexInfo
->ii_NumIndexAttrs
* sizeof(VacAttrStats
*));
325 for (i
= 0; i
< indexInfo
->ii_NumIndexAttrs
; i
++)
327 int keycol
= indexInfo
->ii_KeyAttrNumbers
[i
];
331 /* Found an index expression */
334 if (indexpr_item
== NULL
) /* shouldn't happen */
335 elog(ERROR
, "too few entries in indexprs list");
336 indexkey
= (Node
*) lfirst(indexpr_item
);
337 indexpr_item
= lnext(indexpr_item
);
340 * Can't analyze if the opclass uses a storage type
341 * different from the expression result type. We'd get
342 * confused because the type shown in pg_attribute for
343 * the index column doesn't match what we are getting
344 * from the expression. Perhaps this can be fixed
345 * someday, but for now, punt.
347 if (exprType(indexkey
) !=
348 Irel
[ind
]->rd_att
->attrs
[i
]->atttypid
)
351 thisdata
->vacattrstats
[tcnt
] =
352 examine_attribute(Irel
[ind
], i
+ 1);
353 if (thisdata
->vacattrstats
[tcnt
] != NULL
)
356 analyzableindex
= true;
360 thisdata
->attr_cnt
= tcnt
;
366 * Quit if no analyzable columns and no pg_class update needed.
368 if (attr_cnt
<= 0 && !analyzableindex
&& !update_reltuples
)
372 * Determine how many rows we need to sample, using the worst case from
373 * all analyzable columns. We use a lower bound of 100 rows to avoid
374 * possible overflow in Vitter's algorithm.
377 for (i
= 0; i
< attr_cnt
; i
++)
379 if (targrows
< vacattrstats
[i
]->minrows
)
380 targrows
= vacattrstats
[i
]->minrows
;
382 for (ind
= 0; ind
< nindexes
; ind
++)
384 AnlIndexData
*thisdata
= &indexdata
[ind
];
386 for (i
= 0; i
< thisdata
->attr_cnt
; i
++)
388 if (targrows
< thisdata
->vacattrstats
[i
]->minrows
)
389 targrows
= thisdata
->vacattrstats
[i
]->minrows
;
394 * Acquire the sample rows
396 rows
= (HeapTuple
*) palloc(targrows
* sizeof(HeapTuple
));
397 numrows
= acquire_sample_rows(onerel
, rows
, targrows
,
398 &totalrows
, &totaldeadrows
);
401 * Compute the statistics. Temporary results during the calculations for
402 * each column are stored in a child context. The calc routines are
403 * responsible to make sure that whatever they store into the VacAttrStats
404 * structure is allocated in anl_context.
408 MemoryContext col_context
,
411 col_context
= AllocSetContextCreate(anl_context
,
413 ALLOCSET_DEFAULT_MINSIZE
,
414 ALLOCSET_DEFAULT_INITSIZE
,
415 ALLOCSET_DEFAULT_MAXSIZE
);
416 old_context
= MemoryContextSwitchTo(col_context
);
418 for (i
= 0; i
< attr_cnt
; i
++)
420 VacAttrStats
*stats
= vacattrstats
[i
];
423 stats
->tupDesc
= onerel
->rd_att
;
424 (*stats
->compute_stats
) (stats
,
428 MemoryContextResetAndDeleteChildren(col_context
);
432 compute_index_stats(onerel
, totalrows
,
437 MemoryContextSwitchTo(old_context
);
438 MemoryContextDelete(col_context
);
441 * Emit the completed stats rows into pg_statistic, replacing any
442 * previous statistics for the target columns. (If there are stats in
443 * pg_statistic for columns we didn't process, we leave them alone.)
445 update_attstats(relid
, attr_cnt
, vacattrstats
);
447 for (ind
= 0; ind
< nindexes
; ind
++)
449 AnlIndexData
*thisdata
= &indexdata
[ind
];
451 update_attstats(RelationGetRelid(Irel
[ind
]),
452 thisdata
->attr_cnt
, thisdata
->vacattrstats
);
457 * Update pages/tuples stats in pg_class.
459 if (update_reltuples
)
461 vac_update_relstats(onerel
,
462 RelationGetNumberOfBlocks(onerel
),
463 totalrows
, hasindex
, InvalidTransactionId
);
464 /* report results to the stats collector, too */
465 pgstat_report_analyze(onerel
, totalrows
, totaldeadrows
);
469 * Same for indexes. Vacuum always scans all indexes, so if we're part of
470 * VACUUM ANALYZE, don't overwrite the accurate count already inserted by
473 if (!vacstmt
->vacuum
)
475 for (ind
= 0; ind
< nindexes
; ind
++)
477 AnlIndexData
*thisdata
= &indexdata
[ind
];
478 double totalindexrows
;
480 totalindexrows
= ceil(thisdata
->tupleFract
* totalrows
);
481 vac_update_relstats(Irel
[ind
],
482 RelationGetNumberOfBlocks(Irel
[ind
]),
483 totalindexrows
, false, InvalidTransactionId
);
487 /* We skip to here if there were no analyzable columns */
490 /* If this isn't part of VACUUM ANALYZE, let index AMs do cleanup */
491 if (!vacstmt
->vacuum
)
493 for (ind
= 0; ind
< nindexes
; ind
++)
495 IndexBulkDeleteResult
*stats
;
496 IndexVacuumInfo ivinfo
;
498 ivinfo
.index
= Irel
[ind
];
499 ivinfo
.vacuum_full
= false;
500 ivinfo
.analyze_only
= true;
501 ivinfo
.estimated_count
= true;
502 ivinfo
.message_level
= elevel
;
503 ivinfo
.num_heap_tuples
= onerel
->rd_rel
->reltuples
;
504 ivinfo
.strategy
= vac_strategy
;
506 stats
= index_vacuum_cleanup(&ivinfo
, NULL
);
513 /* Done with indexes */
514 vac_close_indexes(nindexes
, Irel
, NoLock
);
517 * Close source relation now, but keep lock so that no one deletes it
518 * before we commit. (If someone did, they'd fail to clean up the entries
519 * we made in pg_statistic. Also, releasing the lock before commit would
520 * expose us to concurrent-update failures in update_attstats.)
522 relation_close(onerel
, NoLock
);
524 /* Log the action if appropriate */
525 if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration
>= 0)
527 if (Log_autovacuum_min_duration
== 0 ||
528 TimestampDifferenceExceeds(starttime
, GetCurrentTimestamp(),
529 Log_autovacuum_min_duration
))
531 (errmsg("automatic analyze of table \"%s.%s.%s\" system usage: %s",
532 get_database_name(MyDatabaseId
),
533 get_namespace_name(RelationGetNamespace(onerel
)),
534 RelationGetRelationName(onerel
),
535 pg_rusage_show(&ru0
))));
539 * Reset my PGPROC flag. Note: we need this here, and not in vacuum_rel,
540 * because the vacuum flag is cleared by the end-of-xact code.
542 LWLockAcquire(ProcArrayLock
, LW_EXCLUSIVE
);
543 MyProc
->vacuumFlags
&= ~PROC_IN_ANALYZE
;
544 LWLockRelease(ProcArrayLock
);
547 SetUserIdAndContext(save_userid
, save_secdefcxt
);
551 * Compute statistics about indexes of a relation
554 compute_index_stats(Relation onerel
, double totalrows
,
555 AnlIndexData
*indexdata
, int nindexes
,
556 HeapTuple
*rows
, int numrows
,
557 MemoryContext col_context
)
559 MemoryContext ind_context
,
561 Datum values
[INDEX_MAX_KEYS
];
562 bool isnull
[INDEX_MAX_KEYS
];
566 ind_context
= AllocSetContextCreate(anl_context
,
568 ALLOCSET_DEFAULT_MINSIZE
,
569 ALLOCSET_DEFAULT_INITSIZE
,
570 ALLOCSET_DEFAULT_MAXSIZE
);
571 old_context
= MemoryContextSwitchTo(ind_context
);
573 for (ind
= 0; ind
< nindexes
; ind
++)
575 AnlIndexData
*thisdata
= &indexdata
[ind
];
576 IndexInfo
*indexInfo
= thisdata
->indexInfo
;
577 int attr_cnt
= thisdata
->attr_cnt
;
578 TupleTableSlot
*slot
;
580 ExprContext
*econtext
;
587 double totalindexrows
;
589 /* Ignore index if no columns to analyze and not partial */
590 if (attr_cnt
== 0 && indexInfo
->ii_Predicate
== NIL
)
594 * Need an EState for evaluation of index expressions and
595 * partial-index predicates. Create it in the per-index context to be
596 * sure it gets cleaned up at the bottom of the loop.
598 estate
= CreateExecutorState();
599 econtext
= GetPerTupleExprContext(estate
);
600 /* Need a slot to hold the current heap tuple, too */
601 slot
= MakeSingleTupleTableSlot(RelationGetDescr(onerel
));
603 /* Arrange for econtext's scan tuple to be the tuple under test */
604 econtext
->ecxt_scantuple
= slot
;
606 /* Set up execution state for predicate. */
608 ExecPrepareExpr((Expr
*) indexInfo
->ii_Predicate
,
611 /* Compute and save index expression values */
612 exprvals
= (Datum
*) palloc(numrows
* attr_cnt
* sizeof(Datum
));
613 exprnulls
= (bool *) palloc(numrows
* attr_cnt
* sizeof(bool));
616 for (rowno
= 0; rowno
< numrows
; rowno
++)
618 HeapTuple heapTuple
= rows
[rowno
];
620 /* Set up for predicate or expression evaluation */
621 ExecStoreTuple(heapTuple
, slot
, InvalidBuffer
, false);
623 /* If index is partial, check predicate */
624 if (predicate
!= NIL
)
626 if (!ExecQual(predicate
, econtext
, false))
634 * Evaluate the index row to compute expression values. We
635 * could do this by hand, but FormIndexDatum is convenient.
637 FormIndexDatum(indexInfo
,
644 * Save just the columns we care about.
646 for (i
= 0; i
< attr_cnt
; i
++)
648 VacAttrStats
*stats
= thisdata
->vacattrstats
[i
];
649 int attnum
= stats
->attr
->attnum
;
651 exprvals
[tcnt
] = values
[attnum
- 1];
652 exprnulls
[tcnt
] = isnull
[attnum
- 1];
659 * Having counted the number of rows that pass the predicate in the
660 * sample, we can estimate the total number of rows in the index.
662 thisdata
->tupleFract
= (double) numindexrows
/ (double) numrows
;
663 totalindexrows
= ceil(thisdata
->tupleFract
* totalrows
);
666 * Now we can compute the statistics for the expression columns.
668 if (numindexrows
> 0)
670 MemoryContextSwitchTo(col_context
);
671 for (i
= 0; i
< attr_cnt
; i
++)
673 VacAttrStats
*stats
= thisdata
->vacattrstats
[i
];
675 stats
->exprvals
= exprvals
+ i
;
676 stats
->exprnulls
= exprnulls
+ i
;
677 stats
->rowstride
= attr_cnt
;
678 (*stats
->compute_stats
) (stats
,
682 MemoryContextResetAndDeleteChildren(col_context
);
687 MemoryContextSwitchTo(ind_context
);
689 ExecDropSingleTupleTableSlot(slot
);
690 FreeExecutorState(estate
);
691 MemoryContextResetAndDeleteChildren(ind_context
);
694 MemoryContextSwitchTo(old_context
);
695 MemoryContextDelete(ind_context
);
699 * examine_attribute -- pre-analysis of a single column
701 * Determine whether the column is analyzable; if so, create and initialize
702 * a VacAttrStats struct for it. If not, return NULL.
704 static VacAttrStats
*
705 examine_attribute(Relation onerel
, int attnum
)
707 Form_pg_attribute attr
= onerel
->rd_att
->attrs
[attnum
- 1];
713 /* Never analyze dropped columns */
714 if (attr
->attisdropped
)
717 /* Don't analyze column if user has specified not to */
718 if (attr
->attstattarget
== 0)
722 * Create the VacAttrStats struct. Note that we only have a copy of the
723 * fixed fields of the pg_attribute tuple.
725 stats
= (VacAttrStats
*) palloc0(sizeof(VacAttrStats
));
726 stats
->attr
= (Form_pg_attribute
) palloc(ATTRIBUTE_FIXED_PART_SIZE
);
727 memcpy(stats
->attr
, attr
, ATTRIBUTE_FIXED_PART_SIZE
);
728 typtuple
= SearchSysCache(TYPEOID
,
729 ObjectIdGetDatum(attr
->atttypid
),
731 if (!HeapTupleIsValid(typtuple
))
732 elog(ERROR
, "cache lookup failed for type %u", attr
->atttypid
);
733 stats
->attrtype
= (Form_pg_type
) palloc(sizeof(FormData_pg_type
));
734 memcpy(stats
->attrtype
, GETSTRUCT(typtuple
), sizeof(FormData_pg_type
));
735 ReleaseSysCache(typtuple
);
736 stats
->anl_context
= anl_context
;
737 stats
->tupattnum
= attnum
;
740 * The fields describing the stats->stavalues[n] element types default to
741 * the type of the field being analyzed, but the type-specific typanalyze
742 * function can change them if it wants to store something else.
744 for (i
= 0; i
< STATISTIC_NUM_SLOTS
; i
++)
746 stats
->statypid
[i
] = stats
->attr
->atttypid
;
747 stats
->statyplen
[i
] = stats
->attrtype
->typlen
;
748 stats
->statypbyval
[i
] = stats
->attrtype
->typbyval
;
749 stats
->statypalign
[i
] = stats
->attrtype
->typalign
;
753 * Call the type-specific typanalyze function. If none is specified, use
756 if (OidIsValid(stats
->attrtype
->typanalyze
))
757 ok
= DatumGetBool(OidFunctionCall1(stats
->attrtype
->typanalyze
,
758 PointerGetDatum(stats
)));
760 ok
= std_typanalyze(stats
);
762 if (!ok
|| stats
->compute_stats
== NULL
|| stats
->minrows
<= 0)
764 pfree(stats
->attrtype
);
774 * BlockSampler_Init -- prepare for random sampling of blocknumbers
776 * BlockSampler is used for stage one of our new two-stage tuple
777 * sampling mechanism as discussed on pgsql-hackers 2004-04-02 (subject
778 * "Large DB"). It selects a random sample of samplesize blocks out of
779 * the nblocks blocks in the table. If the table has less than
780 * samplesize blocks, all blocks are selected.
782 * Since we know the total number of blocks in advance, we can use the
783 * straightforward Algorithm S from Knuth 3.4.2, rather than Vitter's
787 BlockSampler_Init(BlockSampler bs
, BlockNumber nblocks
, int samplesize
)
789 bs
->N
= nblocks
; /* measured table size */
792 * If we decide to reduce samplesize for tables that have less or not much
793 * more than samplesize blocks, here is the place to do it.
796 bs
->t
= 0; /* blocks scanned so far */
797 bs
->m
= 0; /* blocks selected so far */
801 BlockSampler_HasMore(BlockSampler bs
)
803 return (bs
->t
< bs
->N
) && (bs
->m
< bs
->n
);
807 BlockSampler_Next(BlockSampler bs
)
809 BlockNumber K
= bs
->N
- bs
->t
; /* remaining blocks */
810 int k
= bs
->n
- bs
->m
; /* blocks still to sample */
811 double p
; /* probability to skip block */
812 double V
; /* random */
814 Assert(BlockSampler_HasMore(bs
)); /* hence K > 0 and k > 0 */
816 if ((BlockNumber
) k
>= K
)
818 /* need all the rest */
824 * It is not obvious that this code matches Knuth's Algorithm S.
825 * Knuth says to skip the current block with probability 1 - k/K.
826 * If we are to skip, we should advance t (hence decrease K), and
827 * repeat the same probabilistic test for the next block. The naive
828 * implementation thus requires a random_fract() call for each block
829 * number. But we can reduce this to one random_fract() call per
830 * selected block, by noting that each time the while-test succeeds,
831 * we can reinterpret V as a uniform random number in the range 0 to p.
832 * Therefore, instead of choosing a new V, we just adjust p to be
833 * the appropriate fraction of its former value, and our next loop
834 * makes the appropriate probabilistic test.
836 * We have initially K > k > 0. If the loop reduces K to equal k,
837 * the next while-test must fail since p will become exactly zero
838 * (we assume there will not be roundoff error in the division).
839 * (Note: Knuth suggests a "<=" loop condition, but we use "<" just
840 * to be doubly sure about roundoff error.) Therefore K cannot become
841 * less than k, which means that we cannot fail to select enough blocks.
845 p
= 1.0 - (double) k
/ (double) K
;
850 K
--; /* keep K == N - t */
852 /* adjust p to be new cutoff point in reduced range */
853 p
*= 1.0 - (double) k
/ (double) K
;
862 * acquire_sample_rows -- acquire a random sample of rows from the table
864 * As of May 2004 we use a new two-stage method: Stage one selects up
865 * to targrows random blocks (or all blocks, if there aren't so many).
866 * Stage two scans these blocks and uses the Vitter algorithm to create
867 * a random sample of targrows rows (or less, if there are less in the
868 * sample of blocks). The two stages are executed simultaneously: each
869 * block is processed as soon as stage one returns its number and while
870 * the rows are read stage two controls which ones are to be inserted
873 * Although every row has an equal chance of ending up in the final
874 * sample, this sampling method is not perfect: not every possible
875 * sample has an equal chance of being selected. For large relations
876 * the number of different blocks represented by the sample tends to be
877 * too small. We can live with that for now. Improvements are welcome.
879 * We also estimate the total numbers of live and dead rows in the table,
880 * and return them into *totalrows and *totaldeadrows, respectively.
882 * An important property of this sampling method is that because we do
883 * look at a statistically unbiased set of blocks, we should get
884 * unbiased estimates of the average numbers of live and dead rows per
885 * block. The previous sampling method put too much credence in the row
886 * density near the start of the table.
888 * The returned list of tuples is in order by physical position in the table.
889 * (We will rely on this later to derive correlation estimates.)
892 acquire_sample_rows(Relation onerel
, HeapTuple
*rows
, int targrows
,
893 double *totalrows
, double *totaldeadrows
)
895 int numrows
= 0; /* # rows now in reservoir */
896 double samplerows
= 0; /* total # rows collected */
897 double liverows
= 0; /* # live rows seen */
898 double deadrows
= 0; /* # dead rows seen */
899 double rowstoskip
= -1; /* -1 means not set yet */
900 BlockNumber totalblocks
;
901 TransactionId OldestXmin
;
905 Assert(targrows
> 1);
907 totalblocks
= RelationGetNumberOfBlocks(onerel
);
909 /* Need a cutoff xmin for HeapTupleSatisfiesVacuum */
910 OldestXmin
= GetOldestXmin(onerel
->rd_rel
->relisshared
, true);
912 /* Prepare for sampling block numbers */
913 BlockSampler_Init(&bs
, totalblocks
, targrows
);
914 /* Prepare for sampling rows */
915 rstate
= init_selection_state(targrows
);
917 /* Outer loop over blocks to sample */
918 while (BlockSampler_HasMore(&bs
))
920 BlockNumber targblock
= BlockSampler_Next(&bs
);
923 OffsetNumber targoffset
,
926 vacuum_delay_point();
929 * We must maintain a pin on the target page's buffer to ensure that
930 * the maxoffset value stays good (else concurrent VACUUM might delete
931 * tuples out from under us). Hence, pin the page until we are done
932 * looking at it. We also choose to hold sharelock on the buffer
933 * throughout --- we could release and re-acquire sharelock for each
934 * tuple, but since we aren't doing much work per tuple, the extra
935 * lock traffic is probably better avoided.
937 targbuffer
= ReadBufferExtended(onerel
, MAIN_FORKNUM
, targblock
,
938 RBM_NORMAL
, vac_strategy
);
939 LockBuffer(targbuffer
, BUFFER_LOCK_SHARE
);
940 targpage
= BufferGetPage(targbuffer
);
941 maxoffset
= PageGetMaxOffsetNumber(targpage
);
943 /* Inner loop over all tuples on the selected page */
944 for (targoffset
= FirstOffsetNumber
; targoffset
<= maxoffset
; targoffset
++)
947 HeapTupleData targtuple
;
948 bool sample_it
= false;
950 itemid
= PageGetItemId(targpage
, targoffset
);
953 * We ignore unused and redirect line pointers. DEAD line
954 * pointers should be counted as dead, because we need vacuum to
955 * run to get rid of them. Note that this rule agrees with the
956 * way that heap_page_prune() counts things.
958 if (!ItemIdIsNormal(itemid
))
960 if (ItemIdIsDead(itemid
))
965 ItemPointerSet(&targtuple
.t_self
, targblock
, targoffset
);
967 targtuple
.t_data
= (HeapTupleHeader
) PageGetItem(targpage
, itemid
);
968 targtuple
.t_len
= ItemIdGetLength(itemid
);
970 switch (HeapTupleSatisfiesVacuum(targtuple
.t_data
,
980 case HEAPTUPLE_RECENTLY_DEAD
:
981 /* Count dead and recently-dead rows */
985 case HEAPTUPLE_INSERT_IN_PROGRESS
:
988 * Insert-in-progress rows are not counted. We assume
989 * that when the inserting transaction commits or aborts,
990 * it will send a stats message to increment the proper
991 * count. This works right only if that transaction ends
992 * after we finish analyzing the table; if things happen
993 * in the other order, its stats update will be
994 * overwritten by ours. However, the error will be large
995 * only if the other transaction runs long enough to
996 * insert many tuples, so assuming it will finish after us
997 * is the safer option.
999 * A special case is that the inserting transaction might
1000 * be our own. In this case we should count and sample
1001 * the row, to accommodate users who load a table and
1002 * analyze it in one transaction. (pgstat_report_analyze
1003 * has to adjust the numbers we send to the stats
1004 * collector to make this come out right.)
1006 if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(targtuple
.t_data
)))
1013 case HEAPTUPLE_DELETE_IN_PROGRESS
:
1016 * We count delete-in-progress rows as still live, using
1017 * the same reasoning given above; but we don't bother to
1018 * include them in the sample.
1020 * If the delete was done by our own transaction, however,
1021 * we must count the row as dead to make
1022 * pgstat_report_analyze's stats adjustments come out
1023 * right. (Note: this works out properly when the row was
1024 * both inserted and deleted in our xact.)
1026 if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmax(targtuple
.t_data
)))
1033 elog(ERROR
, "unexpected HeapTupleSatisfiesVacuum result");
1040 * The first targrows sample rows are simply copied into the
1041 * reservoir. Then we start replacing tuples in the sample
1042 * until we reach the end of the relation. This algorithm is
1043 * from Jeff Vitter's paper (see full citation below). It
1044 * works by repeatedly computing the number of tuples to skip
1045 * before selecting a tuple, which replaces a randomly chosen
1046 * element of the reservoir (current set of tuples). At all
1047 * times the reservoir is a true random sample of the tuples
1048 * we've passed over so far, so when we fall off the end of
1049 * the relation we're done.
1051 if (numrows
< targrows
)
1052 rows
[numrows
++] = heap_copytuple(&targtuple
);
1056 * t in Vitter's paper is the number of records already
1057 * processed. If we need to compute a new S value, we
1058 * must use the not-yet-incremented value of samplerows as
1062 rowstoskip
= get_next_S(samplerows
, targrows
, &rstate
);
1064 if (rowstoskip
<= 0)
1067 * Found a suitable tuple, so save it, replacing one
1068 * old tuple at random
1070 int k
= (int) (targrows
* random_fract());
1072 Assert(k
>= 0 && k
< targrows
);
1073 heap_freetuple(rows
[k
]);
1074 rows
[k
] = heap_copytuple(&targtuple
);
1084 /* Now release the lock and pin on the page */
1085 UnlockReleaseBuffer(targbuffer
);
1089 * If we didn't find as many tuples as we wanted then we're done. No sort
1090 * is needed, since they're already in order.
1092 * Otherwise we need to sort the collected tuples by position
1093 * (itempointer). It's not worth worrying about corner cases where the
1094 * tuples are already sorted.
1096 if (numrows
== targrows
)
1097 qsort((void *) rows
, numrows
, sizeof(HeapTuple
), compare_rows
);
1100 * Estimate total numbers of rows in relation.
1104 *totalrows
= floor((liverows
* totalblocks
) / bs
.m
+ 0.5);
1105 *totaldeadrows
= floor((deadrows
* totalblocks
) / bs
.m
+ 0.5);
1110 *totaldeadrows
= 0.0;
1114 * Emit some interesting relation info
1117 (errmsg("\"%s\": scanned %d of %u pages, "
1118 "containing %.0f live rows and %.0f dead rows; "
1119 "%d rows in sample, %.0f estimated total rows",
1120 RelationGetRelationName(onerel
),
1123 numrows
, *totalrows
)));
1128 /* Select a random value R uniformly distributed in (0 - 1) */
1132 return ((double) random() + 1) / ((double) MAX_RANDOM_VALUE
+ 2);
1136 * These two routines embody Algorithm Z from "Random sampling with a
1137 * reservoir" by Jeffrey S. Vitter, in ACM Trans. Math. Softw. 11, 1
1138 * (Mar. 1985), Pages 37-57. Vitter describes his algorithm in terms
1139 * of the count S of records to skip before processing another record.
1140 * It is computed primarily based on t, the number of records already read.
1141 * The only extra state needed between calls is W, a random state variable.
1143 * init_selection_state computes the initial W value.
1145 * Given that we've already read t records (t >= n), get_next_S
1146 * determines the number of records to skip before the next record is
1150 init_selection_state(int n
)
1152 /* Initial value of W (for use when Algorithm Z is first applied) */
1153 return exp(-log(random_fract()) / n
);
1157 get_next_S(double t
, int n
, double *stateptr
)
1161 /* The magic constant here is T from Vitter's paper */
1162 if (t
<= (22.0 * n
))
1164 /* Process records using Algorithm X until t is large enough */
1168 V
= random_fract(); /* Generate V */
1171 /* Note: "num" in Vitter's code is always equal to t - n */
1172 quot
= (t
- (double) n
) / t
;
1173 /* Find min S satisfying (4.1) */
1178 quot
*= (t
- (double) n
) / t
;
1183 /* Now apply Algorithm Z */
1184 double W
= *stateptr
;
1185 double term
= t
- (double) n
+ 1;
1199 /* Generate U and X */
1202 S
= floor(X
); /* S is tentatively set to floor(X) */
1203 /* Test if U <= h(S)/cg(X) in the manner of (6.3) */
1204 tmp
= (t
+ 1) / term
;
1205 lhs
= exp(log(((U
* tmp
* tmp
) * (term
+ S
)) / (t
+ X
)) / n
);
1206 rhs
= (((t
+ X
) / (term
+ S
)) * term
) / t
;
1212 /* Test if U <= f(S)/cg(X) */
1213 y
= (((U
* (t
+ 1)) / term
) * (t
+ S
+ 1)) / (t
+ X
);
1217 numer_lim
= term
+ S
;
1221 denom
= t
- (double) n
+ S
;
1224 for (numer
= t
+ S
; numer
>= numer_lim
; numer
-= 1)
1229 W
= exp(-log(random_fract()) / n
); /* Generate W in advance */
1230 if (exp(log(y
) / n
) <= (t
+ X
) / t
)
1239 * qsort comparator for sorting rows[] array
1242 compare_rows(const void *a
, const void *b
)
1244 HeapTuple ha
= *(HeapTuple
*) a
;
1245 HeapTuple hb
= *(HeapTuple
*) b
;
1246 BlockNumber ba
= ItemPointerGetBlockNumber(&ha
->t_self
);
1247 OffsetNumber oa
= ItemPointerGetOffsetNumber(&ha
->t_self
);
1248 BlockNumber bb
= ItemPointerGetBlockNumber(&hb
->t_self
);
1249 OffsetNumber ob
= ItemPointerGetOffsetNumber(&hb
->t_self
);
1264 * update_attstats() -- update attribute statistics for one relation
1266 * Statistics are stored in several places: the pg_class row for the
1267 * relation has stats about the whole relation, and there is a
1268 * pg_statistic row for each (non-system) attribute that has ever
1269 * been analyzed. The pg_class values are updated by VACUUM, not here.
1271 * pg_statistic rows are just added or updated normally. This means
1272 * that pg_statistic will probably contain some deleted rows at the
1273 * completion of a vacuum cycle, unless it happens to get vacuumed last.
1275 * To keep things simple, we punt for pg_statistic, and don't try
1276 * to compute or store rows for pg_statistic itself in pg_statistic.
1277 * This could possibly be made to work, but it's not worth the trouble.
1278 * Note analyze_rel() has seen to it that we won't come here when
1279 * vacuuming pg_statistic itself.
1281 * Note: there would be a race condition here if two backends could
1282 * ANALYZE the same table concurrently. Presently, we lock that out
1283 * by taking a self-exclusive lock on the relation in analyze_rel().
1286 update_attstats(Oid relid
, int natts
, VacAttrStats
**vacattrstats
)
1292 return; /* nothing to do */
1294 sd
= heap_open(StatisticRelationId
, RowExclusiveLock
);
1296 for (attno
= 0; attno
< natts
; attno
++)
1298 VacAttrStats
*stats
= vacattrstats
[attno
];
1304 Datum values
[Natts_pg_statistic
];
1305 bool nulls
[Natts_pg_statistic
];
1306 bool replaces
[Natts_pg_statistic
];
1308 /* Ignore attr if we weren't able to collect stats */
1309 if (!stats
->stats_valid
)
1313 * Construct a new pg_statistic tuple
1315 for (i
= 0; i
< Natts_pg_statistic
; ++i
)
1322 values
[i
++] = ObjectIdGetDatum(relid
); /* starelid */
1323 values
[i
++] = Int16GetDatum(stats
->attr
->attnum
); /* staattnum */
1324 values
[i
++] = Float4GetDatum(stats
->stanullfrac
); /* stanullfrac */
1325 values
[i
++] = Int32GetDatum(stats
->stawidth
); /* stawidth */
1326 values
[i
++] = Float4GetDatum(stats
->stadistinct
); /* stadistinct */
1327 for (k
= 0; k
< STATISTIC_NUM_SLOTS
; k
++)
1329 values
[i
++] = Int16GetDatum(stats
->stakind
[k
]); /* stakindN */
1331 for (k
= 0; k
< STATISTIC_NUM_SLOTS
; k
++)
1333 values
[i
++] = ObjectIdGetDatum(stats
->staop
[k
]); /* staopN */
1335 for (k
= 0; k
< STATISTIC_NUM_SLOTS
; k
++)
1337 int nnum
= stats
->numnumbers
[k
];
1341 Datum
*numdatums
= (Datum
*) palloc(nnum
* sizeof(Datum
));
1344 for (n
= 0; n
< nnum
; n
++)
1345 numdatums
[n
] = Float4GetDatum(stats
->stanumbers
[k
][n
]);
1346 /* XXX knows more than it should about type float4: */
1347 arry
= construct_array(numdatums
, nnum
,
1349 sizeof(float4
), FLOAT4PASSBYVAL
, 'i');
1350 values
[i
++] = PointerGetDatum(arry
); /* stanumbersN */
1355 values
[i
++] = (Datum
) 0;
1358 for (k
= 0; k
< STATISTIC_NUM_SLOTS
; k
++)
1360 if (stats
->numvalues
[k
] > 0)
1364 arry
= construct_array(stats
->stavalues
[k
],
1365 stats
->numvalues
[k
],
1367 stats
->statyplen
[k
],
1368 stats
->statypbyval
[k
],
1369 stats
->statypalign
[k
]);
1370 values
[i
++] = PointerGetDatum(arry
); /* stavaluesN */
1375 values
[i
++] = (Datum
) 0;
1379 /* Is there already a pg_statistic tuple for this attribute? */
1380 oldtup
= SearchSysCache(STATRELATT
,
1381 ObjectIdGetDatum(relid
),
1382 Int16GetDatum(stats
->attr
->attnum
),
1385 if (HeapTupleIsValid(oldtup
))
1387 /* Yes, replace it */
1388 stup
= heap_modify_tuple(oldtup
,
1389 RelationGetDescr(sd
),
1393 ReleaseSysCache(oldtup
);
1394 simple_heap_update(sd
, &stup
->t_self
, stup
);
1398 /* No, insert new tuple */
1399 stup
= heap_form_tuple(RelationGetDescr(sd
), values
, nulls
);
1400 simple_heap_insert(sd
, stup
);
1403 /* update indexes too */
1404 CatalogUpdateIndexes(sd
, stup
);
1406 heap_freetuple(stup
);
1409 heap_close(sd
, RowExclusiveLock
);
1413 * Standard fetch function for use by compute_stats subroutines.
1415 * This exists to provide some insulation between compute_stats routines
1416 * and the actual storage of the sample data.
1419 std_fetch_func(VacAttrStatsP stats
, int rownum
, bool *isNull
)
1421 int attnum
= stats
->tupattnum
;
1422 HeapTuple tuple
= stats
->rows
[rownum
];
1423 TupleDesc tupDesc
= stats
->tupDesc
;
1425 return heap_getattr(tuple
, attnum
, tupDesc
, isNull
);
1429 * Fetch function for analyzing index expressions.
1431 * We have not bothered to construct index tuples, instead the data is
1432 * just in Datum arrays.
1435 ind_fetch_func(VacAttrStatsP stats
, int rownum
, bool *isNull
)
1439 /* exprvals and exprnulls are already offset for proper column */
1440 i
= rownum
* stats
->rowstride
;
1441 *isNull
= stats
->exprnulls
[i
];
1442 return stats
->exprvals
[i
];
1446 /*==========================================================================
1448 * Code below this point represents the "standard" type-specific statistics
1449 * analysis algorithms. This code can be replaced on a per-data-type basis
1450 * by setting a nonzero value in pg_type.typanalyze.
1452 *==========================================================================
1457 * To avoid consuming too much memory during analysis and/or too much space
1458 * in the resulting pg_statistic rows, we ignore varlena datums that are wider
1459 * than WIDTH_THRESHOLD (after detoasting!). This is legitimate for MCV
1460 * and distinct-value calculations since a wide value is unlikely to be
1461 * duplicated at all, much less be a most-common value. For the same reason,
1462 * ignoring wide values will not affect our estimates of histogram bin
1463 * boundaries very much.
1465 #define WIDTH_THRESHOLD 1024
1467 #define swapInt(a,b) do {int _tmp; _tmp=a; a=b; b=_tmp;} while(0)
1468 #define swapDatum(a,b) do {Datum _tmp; _tmp=a; a=b; b=_tmp;} while(0)
1471 * Extra information used by the default analysis routines
1475 Oid eqopr
; /* '=' operator for datatype, if any */
1476 Oid eqfunc
; /* and associated function */
1477 Oid ltopr
; /* '<' operator for datatype, if any */
1482 Datum value
; /* a data value */
1483 int tupno
; /* position index for tuple it came from */
1488 int count
; /* # of duplicates */
1489 int first
; /* values[] index of first occurrence */
1497 } CompareScalarsContext
;
1500 static void compute_minimal_stats(VacAttrStatsP stats
,
1501 AnalyzeAttrFetchFunc fetchfunc
,
1504 static void compute_scalar_stats(VacAttrStatsP stats
,
1505 AnalyzeAttrFetchFunc fetchfunc
,
1508 static int compare_scalars(const void *a
, const void *b
, void *arg
);
1509 static int compare_mcvs(const void *a
, const void *b
);
1513 * std_typanalyze -- the default type-specific typanalyze function
1516 std_typanalyze(VacAttrStats
*stats
)
1518 Form_pg_attribute attr
= stats
->attr
;
1521 StdAnalyzeData
*mystats
;
1523 /* If the attstattarget column is negative, use the default value */
1524 /* NB: it is okay to scribble on stats->attr since it's a copy */
1525 if (attr
->attstattarget
< 0)
1526 attr
->attstattarget
= default_statistics_target
;
1528 /* Look for default "<" and "=" operators for column's type */
1529 get_sort_group_operators(attr
->atttypid
,
1530 false, false, false,
1531 <opr
, &eqopr
, NULL
);
1533 /* If column has no "=" operator, we can't do much of anything */
1534 if (!OidIsValid(eqopr
))
1537 /* Save the operator info for compute_stats routines */
1538 mystats
= (StdAnalyzeData
*) palloc(sizeof(StdAnalyzeData
));
1539 mystats
->eqopr
= eqopr
;
1540 mystats
->eqfunc
= get_opcode(eqopr
);
1541 mystats
->ltopr
= ltopr
;
1542 stats
->extra_data
= mystats
;
1545 * Determine which standard statistics algorithm to use
1547 if (OidIsValid(ltopr
))
1549 /* Seems to be a scalar datatype */
1550 stats
->compute_stats
= compute_scalar_stats
;
1551 /*--------------------
1552 * The following choice of minrows is based on the paper
1553 * "Random sampling for histogram construction: how much is enough?"
1554 * by Surajit Chaudhuri, Rajeev Motwani and Vivek Narasayya, in
1555 * Proceedings of ACM SIGMOD International Conference on Management
1556 * of Data, 1998, Pages 436-447. Their Corollary 1 to Theorem 5
1557 * says that for table size n, histogram size k, maximum relative
1558 * error in bin size f, and error probability gamma, the minimum
1559 * random sample size is
1560 * r = 4 * k * ln(2*n/gamma) / f^2
1561 * Taking f = 0.5, gamma = 0.01, n = 10^6 rows, we obtain
1563 * Note that because of the log function, the dependence on n is
1564 * quite weak; even at n = 10^12, a 300*k sample gives <= 0.66
1565 * bin size error with probability 0.99. So there's no real need to
1566 * scale for n, which is a good thing because we don't necessarily
1567 * know it at this point.
1568 *--------------------
1570 stats
->minrows
= 300 * attr
->attstattarget
;
1574 /* Can't do much but the minimal stuff */
1575 stats
->compute_stats
= compute_minimal_stats
;
1576 /* Might as well use the same minrows as above */
1577 stats
->minrows
= 300 * attr
->attstattarget
;
1584 * compute_minimal_stats() -- compute minimal column statistics
1586 * We use this when we can find only an "=" operator for the datatype.
1588 * We determine the fraction of non-null rows, the average width, the
1589 * most common values, and the (estimated) number of distinct values.
1591 * The most common values are determined by brute force: we keep a list
1592 * of previously seen values, ordered by number of times seen, as we scan
1593 * the samples. A newly seen value is inserted just after the last
1594 * multiply-seen value, causing the bottommost (oldest) singly-seen value
1595 * to drop off the list. The accuracy of this method, and also its cost,
1596 * depend mainly on the length of the list we are willing to keep.
1599 compute_minimal_stats(VacAttrStatsP stats
,
1600 AnalyzeAttrFetchFunc fetchfunc
,
1606 int nonnull_cnt
= 0;
1607 int toowide_cnt
= 0;
1608 double total_width
= 0;
1609 bool is_varlena
= (!stats
->attr
->attbyval
&&
1610 stats
->attr
->attlen
== -1);
1611 bool is_varwidth
= (!stats
->attr
->attbyval
&&
1612 stats
->attr
->attlen
< 0);
1622 int num_mcv
= stats
->attr
->attstattarget
;
1623 StdAnalyzeData
*mystats
= (StdAnalyzeData
*) stats
->extra_data
;
1626 * We track up to 2*n values for an n-element MCV list; but at least 10
1628 track_max
= 2 * num_mcv
;
1631 track
= (TrackItem
*) palloc(track_max
* sizeof(TrackItem
));
1634 fmgr_info(mystats
->eqfunc
, &f_cmpeq
);
1636 for (i
= 0; i
< samplerows
; i
++)
1644 vacuum_delay_point();
1646 value
= fetchfunc(stats
, i
, &isnull
);
1648 /* Check for null/nonnull */
1657 * If it's a variable-width field, add up widths for average width
1658 * calculation. Note that if the value is toasted, we use the toasted
1659 * width. We don't bother with this calculation if it's a fixed-width
1664 total_width
+= VARSIZE_ANY(DatumGetPointer(value
));
1667 * If the value is toasted, we want to detoast it just once to
1668 * avoid repeated detoastings and resultant excess memory usage
1669 * during the comparisons. Also, check to see if the value is
1670 * excessively wide, and if so don't detoast at all --- just
1673 if (toast_raw_datum_size(value
) > WIDTH_THRESHOLD
)
1678 value
= PointerGetDatum(PG_DETOAST_DATUM(value
));
1680 else if (is_varwidth
)
1682 /* must be cstring */
1683 total_width
+= strlen(DatumGetCString(value
)) + 1;
1687 * See if the value matches anything we're already tracking.
1690 firstcount1
= track_cnt
;
1691 for (j
= 0; j
< track_cnt
; j
++)
1693 if (DatumGetBool(FunctionCall2(&f_cmpeq
, value
, track
[j
].value
)))
1698 if (j
< firstcount1
&& track
[j
].count
== 1)
1706 /* This value may now need to "bubble up" in the track list */
1707 while (j
> 0 && track
[j
].count
> track
[j
- 1].count
)
1709 swapDatum(track
[j
].value
, track
[j
- 1].value
);
1710 swapInt(track
[j
].count
, track
[j
- 1].count
);
1716 /* No match. Insert at head of count-1 list */
1717 if (track_cnt
< track_max
)
1719 for (j
= track_cnt
- 1; j
> firstcount1
; j
--)
1721 track
[j
].value
= track
[j
- 1].value
;
1722 track
[j
].count
= track
[j
- 1].count
;
1724 if (firstcount1
< track_cnt
)
1726 track
[firstcount1
].value
= value
;
1727 track
[firstcount1
].count
= 1;
1732 /* We can only compute real stats if we found some non-null values. */
1733 if (nonnull_cnt
> 0)
1738 stats
->stats_valid
= true;
1739 /* Do the simple null-frac and width stats */
1740 stats
->stanullfrac
= (double) null_cnt
/ (double) samplerows
;
1742 stats
->stawidth
= total_width
/ (double) nonnull_cnt
;
1744 stats
->stawidth
= stats
->attrtype
->typlen
;
1746 /* Count the number of values we found multiple times */
1748 for (nmultiple
= 0; nmultiple
< track_cnt
; nmultiple
++)
1750 if (track
[nmultiple
].count
== 1)
1752 summultiple
+= track
[nmultiple
].count
;
1757 /* If we found no repeated values, assume it's a unique column */
1758 stats
->stadistinct
= -1.0;
1760 else if (track_cnt
< track_max
&& toowide_cnt
== 0 &&
1761 nmultiple
== track_cnt
)
1764 * Our track list includes every value in the sample, and every
1765 * value appeared more than once. Assume the column has just
1768 stats
->stadistinct
= track_cnt
;
1773 * Estimate the number of distinct values using the estimator
1774 * proposed by Haas and Stokes in IBM Research Report RJ 10025:
1775 * n*d / (n - f1 + f1*n/N)
1776 * where f1 is the number of distinct values that occurred
1777 * exactly once in our sample of n rows (from a total of N),
1778 * and d is the total number of distinct values in the sample.
1779 * This is their Duj1 estimator; the other estimators they
1780 * recommend are considerably more complex, and are numerically
1781 * very unstable when n is much smaller than N.
1783 * We assume (not very reliably!) that all the multiply-occurring
1784 * values are reflected in the final track[] list, and the other
1785 * nonnull values all appeared but once. (XXX this usually
1786 * results in a drastic overestimate of ndistinct. Can we do
1790 int f1
= nonnull_cnt
- summultiple
;
1791 int d
= f1
+ nmultiple
;
1796 numer
= (double) samplerows
*(double) d
;
1798 denom
= (double) (samplerows
- f1
) +
1799 (double) f1
*(double) samplerows
/ totalrows
;
1801 stadistinct
= numer
/ denom
;
1802 /* Clamp to sane range in case of roundoff error */
1803 if (stadistinct
< (double) d
)
1804 stadistinct
= (double) d
;
1805 if (stadistinct
> totalrows
)
1806 stadistinct
= totalrows
;
1807 stats
->stadistinct
= floor(stadistinct
+ 0.5);
1811 * If we estimated the number of distinct values at more than 10% of
1812 * the total row count (a very arbitrary limit), then assume that
1813 * stadistinct should scale with the row count rather than be a fixed
1816 if (stats
->stadistinct
> 0.1 * totalrows
)
1817 stats
->stadistinct
= -(stats
->stadistinct
/ totalrows
);
1820 * Decide how many values are worth storing as most-common values. If
1821 * we are able to generate a complete MCV list (all the values in the
1822 * sample will fit, and we think these are all the ones in the table),
1823 * then do so. Otherwise, store only those values that are
1824 * significantly more common than the (estimated) average. We set the
1825 * threshold rather arbitrarily at 25% more than average, with at
1826 * least 2 instances in the sample.
1828 if (track_cnt
< track_max
&& toowide_cnt
== 0 &&
1829 stats
->stadistinct
> 0 &&
1830 track_cnt
<= num_mcv
)
1832 /* Track list includes all values seen, and all will fit */
1833 num_mcv
= track_cnt
;
1837 double ndistinct
= stats
->stadistinct
;
1842 ndistinct
= -ndistinct
* totalrows
;
1843 /* estimate # of occurrences in sample of a typical value */
1844 avgcount
= (double) samplerows
/ ndistinct
;
1845 /* set minimum threshold count to store a value */
1846 mincount
= avgcount
* 1.25;
1849 if (num_mcv
> track_cnt
)
1850 num_mcv
= track_cnt
;
1851 for (i
= 0; i
< num_mcv
; i
++)
1853 if (track
[i
].count
< mincount
)
1861 /* Generate MCV slot entry */
1864 MemoryContext old_context
;
1868 /* Must copy the target values into anl_context */
1869 old_context
= MemoryContextSwitchTo(stats
->anl_context
);
1870 mcv_values
= (Datum
*) palloc(num_mcv
* sizeof(Datum
));
1871 mcv_freqs
= (float4
*) palloc(num_mcv
* sizeof(float4
));
1872 for (i
= 0; i
< num_mcv
; i
++)
1874 mcv_values
[i
] = datumCopy(track
[i
].value
,
1875 stats
->attr
->attbyval
,
1876 stats
->attr
->attlen
);
1877 mcv_freqs
[i
] = (double) track
[i
].count
/ (double) samplerows
;
1879 MemoryContextSwitchTo(old_context
);
1881 stats
->stakind
[0] = STATISTIC_KIND_MCV
;
1882 stats
->staop
[0] = mystats
->eqopr
;
1883 stats
->stanumbers
[0] = mcv_freqs
;
1884 stats
->numnumbers
[0] = num_mcv
;
1885 stats
->stavalues
[0] = mcv_values
;
1886 stats
->numvalues
[0] = num_mcv
;
1889 * Accept the defaults for stats->statypid and others. They have
1890 * been set before we were called (see vacuum.h)
1894 else if (null_cnt
> 0)
1896 /* We found only nulls; assume the column is entirely null */
1897 stats
->stats_valid
= true;
1898 stats
->stanullfrac
= 1.0;
1900 stats
->stawidth
= 0; /* "unknown" */
1902 stats
->stawidth
= stats
->attrtype
->typlen
;
1903 stats
->stadistinct
= 0.0; /* "unknown" */
1906 /* We don't need to bother cleaning up any of our temporary palloc's */
1911 * compute_scalar_stats() -- compute column statistics
1913 * We use this when we can find "=" and "<" operators for the datatype.
1915 * We determine the fraction of non-null rows, the average width, the
1916 * most common values, the (estimated) number of distinct values, the
1917 * distribution histogram, and the correlation of physical to logical order.
1919 * The desired stats can be determined fairly easily after sorting the
1920 * data values into order.
1923 compute_scalar_stats(VacAttrStatsP stats
,
1924 AnalyzeAttrFetchFunc fetchfunc
,
1930 int nonnull_cnt
= 0;
1931 int toowide_cnt
= 0;
1932 double total_width
= 0;
1933 bool is_varlena
= (!stats
->attr
->attbyval
&&
1934 stats
->attr
->attlen
== -1);
1935 bool is_varwidth
= (!stats
->attr
->attbyval
&&
1936 stats
->attr
->attlen
< 0);
1944 ScalarMCVItem
*track
;
1946 int num_mcv
= stats
->attr
->attstattarget
;
1947 int num_bins
= stats
->attr
->attstattarget
;
1948 StdAnalyzeData
*mystats
= (StdAnalyzeData
*) stats
->extra_data
;
1950 values
= (ScalarItem
*) palloc(samplerows
* sizeof(ScalarItem
));
1951 tupnoLink
= (int *) palloc(samplerows
* sizeof(int));
1952 track
= (ScalarMCVItem
*) palloc(num_mcv
* sizeof(ScalarMCVItem
));
1954 SelectSortFunction(mystats
->ltopr
, false, &cmpFn
, &cmpFlags
);
1955 fmgr_info(cmpFn
, &f_cmpfn
);
1957 /* Initial scan to find sortable values */
1958 for (i
= 0; i
< samplerows
; i
++)
1963 vacuum_delay_point();
1965 value
= fetchfunc(stats
, i
, &isnull
);
1967 /* Check for null/nonnull */
1976 * If it's a variable-width field, add up widths for average width
1977 * calculation. Note that if the value is toasted, we use the toasted
1978 * width. We don't bother with this calculation if it's a fixed-width
1983 total_width
+= VARSIZE_ANY(DatumGetPointer(value
));
1986 * If the value is toasted, we want to detoast it just once to
1987 * avoid repeated detoastings and resultant excess memory usage
1988 * during the comparisons. Also, check to see if the value is
1989 * excessively wide, and if so don't detoast at all --- just
1992 if (toast_raw_datum_size(value
) > WIDTH_THRESHOLD
)
1997 value
= PointerGetDatum(PG_DETOAST_DATUM(value
));
1999 else if (is_varwidth
)
2001 /* must be cstring */
2002 total_width
+= strlen(DatumGetCString(value
)) + 1;
2005 /* Add it to the list to be sorted */
2006 values
[values_cnt
].value
= value
;
2007 values
[values_cnt
].tupno
= values_cnt
;
2008 tupnoLink
[values_cnt
] = values_cnt
;
2012 /* We can only compute real stats if we found some sortable values. */
2015 int ndistinct
, /* # distinct values in sample */
2016 nmultiple
, /* # that appear multiple times */
2020 CompareScalarsContext cxt
;
2022 /* Sort the collected values */
2023 cxt
.cmpFn
= &f_cmpfn
;
2024 cxt
.cmpFlags
= cmpFlags
;
2025 cxt
.tupnoLink
= tupnoLink
;
2026 qsort_arg((void *) values
, values_cnt
, sizeof(ScalarItem
),
2027 compare_scalars
, (void *) &cxt
);
2030 * Now scan the values in order, find the most common ones, and also
2031 * accumulate ordering-correlation statistics.
2033 * To determine which are most common, we first have to count the
2034 * number of duplicates of each value. The duplicates are adjacent in
2035 * the sorted list, so a brute-force approach is to compare successive
2036 * datum values until we find two that are not equal. However, that
2037 * requires N-1 invocations of the datum comparison routine, which are
2038 * completely redundant with work that was done during the sort. (The
2039 * sort algorithm must at some point have compared each pair of items
2040 * that are adjacent in the sorted order; otherwise it could not know
2041 * that it's ordered the pair correctly.) We exploit this by having
2042 * compare_scalars remember the highest tupno index that each
2043 * ScalarItem has been found equal to. At the end of the sort, a
2044 * ScalarItem's tupnoLink will still point to itself if and only if it
2045 * is the last item of its group of duplicates (since the group will
2046 * be ordered by tupno).
2052 for (i
= 0; i
< values_cnt
; i
++)
2054 int tupno
= values
[i
].tupno
;
2056 corr_xysum
+= ((double) i
) * ((double) tupno
);
2058 if (tupnoLink
[tupno
] == tupno
)
2060 /* Reached end of duplicates of this value */
2065 if (track_cnt
< num_mcv
||
2066 dups_cnt
> track
[track_cnt
- 1].count
)
2069 * Found a new item for the mcv list; find its
2070 * position, bubbling down old items if needed. Loop
2071 * invariant is that j points at an empty/ replaceable
2076 if (track_cnt
< num_mcv
)
2078 for (j
= track_cnt
- 1; j
> 0; j
--)
2080 if (dups_cnt
<= track
[j
- 1].count
)
2082 track
[j
].count
= track
[j
- 1].count
;
2083 track
[j
].first
= track
[j
- 1].first
;
2085 track
[j
].count
= dups_cnt
;
2086 track
[j
].first
= i
+ 1 - dups_cnt
;
2093 stats
->stats_valid
= true;
2094 /* Do the simple null-frac and width stats */
2095 stats
->stanullfrac
= (double) null_cnt
/ (double) samplerows
;
2097 stats
->stawidth
= total_width
/ (double) nonnull_cnt
;
2099 stats
->stawidth
= stats
->attrtype
->typlen
;
2103 /* If we found no repeated values, assume it's a unique column */
2104 stats
->stadistinct
= -1.0;
2106 else if (toowide_cnt
== 0 && nmultiple
== ndistinct
)
2109 * Every value in the sample appeared more than once. Assume the
2110 * column has just these values.
2112 stats
->stadistinct
= ndistinct
;
2117 * Estimate the number of distinct values using the estimator
2118 * proposed by Haas and Stokes in IBM Research Report RJ 10025:
2119 * n*d / (n - f1 + f1*n/N)
2120 * where f1 is the number of distinct values that occurred
2121 * exactly once in our sample of n rows (from a total of N),
2122 * and d is the total number of distinct values in the sample.
2123 * This is their Duj1 estimator; the other estimators they
2124 * recommend are considerably more complex, and are numerically
2125 * very unstable when n is much smaller than N.
2127 * Overwidth values are assumed to have been distinct.
2130 int f1
= ndistinct
- nmultiple
+ toowide_cnt
;
2131 int d
= f1
+ nmultiple
;
2136 numer
= (double) samplerows
*(double) d
;
2138 denom
= (double) (samplerows
- f1
) +
2139 (double) f1
*(double) samplerows
/ totalrows
;
2141 stadistinct
= numer
/ denom
;
2142 /* Clamp to sane range in case of roundoff error */
2143 if (stadistinct
< (double) d
)
2144 stadistinct
= (double) d
;
2145 if (stadistinct
> totalrows
)
2146 stadistinct
= totalrows
;
2147 stats
->stadistinct
= floor(stadistinct
+ 0.5);
2151 * If we estimated the number of distinct values at more than 10% of
2152 * the total row count (a very arbitrary limit), then assume that
2153 * stadistinct should scale with the row count rather than be a fixed
2156 if (stats
->stadistinct
> 0.1 * totalrows
)
2157 stats
->stadistinct
= -(stats
->stadistinct
/ totalrows
);
2160 * Decide how many values are worth storing as most-common values. If
2161 * we are able to generate a complete MCV list (all the values in the
2162 * sample will fit, and we think these are all the ones in the table),
2163 * then do so. Otherwise, store only those values that are
2164 * significantly more common than the (estimated) average. We set the
2165 * threshold rather arbitrarily at 25% more than average, with at
2166 * least 2 instances in the sample. Also, we won't suppress values
2167 * that have a frequency of at least 1/K where K is the intended
2168 * number of histogram bins; such values might otherwise cause us to
2169 * emit duplicate histogram bin boundaries. (We might end up with
2170 * duplicate histogram entries anyway, if the distribution is skewed;
2171 * but we prefer to treat such values as MCVs if at all possible.)
2173 if (track_cnt
== ndistinct
&& toowide_cnt
== 0 &&
2174 stats
->stadistinct
> 0 &&
2175 track_cnt
<= num_mcv
)
2177 /* Track list includes all values seen, and all will fit */
2178 num_mcv
= track_cnt
;
2182 double ndistinct
= stats
->stadistinct
;
2188 ndistinct
= -ndistinct
* totalrows
;
2189 /* estimate # of occurrences in sample of a typical value */
2190 avgcount
= (double) samplerows
/ ndistinct
;
2191 /* set minimum threshold count to store a value */
2192 mincount
= avgcount
* 1.25;
2195 /* don't let threshold exceed 1/K, however */
2196 maxmincount
= (double) samplerows
/ (double) num_bins
;
2197 if (mincount
> maxmincount
)
2198 mincount
= maxmincount
;
2199 if (num_mcv
> track_cnt
)
2200 num_mcv
= track_cnt
;
2201 for (i
= 0; i
< num_mcv
; i
++)
2203 if (track
[i
].count
< mincount
)
2211 /* Generate MCV slot entry */
2214 MemoryContext old_context
;
2218 /* Must copy the target values into anl_context */
2219 old_context
= MemoryContextSwitchTo(stats
->anl_context
);
2220 mcv_values
= (Datum
*) palloc(num_mcv
* sizeof(Datum
));
2221 mcv_freqs
= (float4
*) palloc(num_mcv
* sizeof(float4
));
2222 for (i
= 0; i
< num_mcv
; i
++)
2224 mcv_values
[i
] = datumCopy(values
[track
[i
].first
].value
,
2225 stats
->attr
->attbyval
,
2226 stats
->attr
->attlen
);
2227 mcv_freqs
[i
] = (double) track
[i
].count
/ (double) samplerows
;
2229 MemoryContextSwitchTo(old_context
);
2231 stats
->stakind
[slot_idx
] = STATISTIC_KIND_MCV
;
2232 stats
->staop
[slot_idx
] = mystats
->eqopr
;
2233 stats
->stanumbers
[slot_idx
] = mcv_freqs
;
2234 stats
->numnumbers
[slot_idx
] = num_mcv
;
2235 stats
->stavalues
[slot_idx
] = mcv_values
;
2236 stats
->numvalues
[slot_idx
] = num_mcv
;
2239 * Accept the defaults for stats->statypid and others. They have
2240 * been set before we were called (see vacuum.h)
2246 * Generate a histogram slot entry if there are at least two distinct
2247 * values not accounted for in the MCV list. (This ensures the
2248 * histogram won't collapse to empty or a singleton.)
2250 num_hist
= ndistinct
- num_mcv
;
2251 if (num_hist
> num_bins
)
2252 num_hist
= num_bins
+ 1;
2255 MemoryContext old_context
;
2263 /* Sort the MCV items into position order to speed next loop */
2264 qsort((void *) track
, num_mcv
,
2265 sizeof(ScalarMCVItem
), compare_mcvs
);
2268 * Collapse out the MCV items from the values[] array.
2270 * Note we destroy the values[] array here... but we don't need it
2271 * for anything more. We do, however, still need values_cnt.
2272 * nvals will be the number of remaining entries in values[].
2281 j
= 0; /* index of next interesting MCV item */
2282 while (src
< values_cnt
)
2288 int first
= track
[j
].first
;
2292 /* advance past this MCV item */
2293 src
= first
+ track
[j
].count
;
2297 ncopy
= first
- src
;
2300 ncopy
= values_cnt
- src
;
2301 memmove(&values
[dest
], &values
[src
],
2302 ncopy
* sizeof(ScalarItem
));
2310 Assert(nvals
>= num_hist
);
2312 /* Must copy the target values into anl_context */
2313 old_context
= MemoryContextSwitchTo(stats
->anl_context
);
2314 hist_values
= (Datum
*) palloc(num_hist
* sizeof(Datum
));
2317 * The object of this loop is to copy the first and last values[]
2318 * entries along with evenly-spaced values in between. So the
2319 * i'th value is values[(i * (nvals - 1)) / (num_hist - 1)]. But
2320 * computing that subscript directly risks integer overflow when
2321 * the stats target is more than a couple thousand. Instead we
2322 * add (nvals - 1) / (num_hist - 1) to pos at each step, tracking
2323 * the integral and fractional parts of the sum separately.
2325 delta
= (nvals
- 1) / (num_hist
- 1);
2326 deltafrac
= (nvals
- 1) % (num_hist
- 1);
2329 for (i
= 0; i
< num_hist
; i
++)
2331 hist_values
[i
] = datumCopy(values
[pos
].value
,
2332 stats
->attr
->attbyval
,
2333 stats
->attr
->attlen
);
2335 posfrac
+= deltafrac
;
2336 if (posfrac
>= (num_hist
- 1))
2338 /* fractional part exceeds 1, carry to integer part */
2340 posfrac
-= (num_hist
- 1);
2344 MemoryContextSwitchTo(old_context
);
2346 stats
->stakind
[slot_idx
] = STATISTIC_KIND_HISTOGRAM
;
2347 stats
->staop
[slot_idx
] = mystats
->ltopr
;
2348 stats
->stavalues
[slot_idx
] = hist_values
;
2349 stats
->numvalues
[slot_idx
] = num_hist
;
2352 * Accept the defaults for stats->statypid and others. They have
2353 * been set before we were called (see vacuum.h)
2358 /* Generate a correlation entry if there are multiple values */
2361 MemoryContext old_context
;
2366 /* Must copy the target values into anl_context */
2367 old_context
= MemoryContextSwitchTo(stats
->anl_context
);
2368 corrs
= (float4
*) palloc(sizeof(float4
));
2369 MemoryContextSwitchTo(old_context
);
2372 * Since we know the x and y value sets are both
2373 * 0, 1, ..., values_cnt-1
2374 * we have sum(x) = sum(y) =
2375 * (values_cnt-1)*values_cnt / 2
2376 * and sum(x^2) = sum(y^2) =
2377 * (values_cnt-1)*values_cnt*(2*values_cnt-1) / 6.
2380 corr_xsum
= ((double) (values_cnt
- 1)) *
2381 ((double) values_cnt
) / 2.0;
2382 corr_x2sum
= ((double) (values_cnt
- 1)) *
2383 ((double) values_cnt
) * (double) (2 * values_cnt
- 1) / 6.0;
2385 /* And the correlation coefficient reduces to */
2386 corrs
[0] = (values_cnt
* corr_xysum
- corr_xsum
* corr_xsum
) /
2387 (values_cnt
* corr_x2sum
- corr_xsum
* corr_xsum
);
2389 stats
->stakind
[slot_idx
] = STATISTIC_KIND_CORRELATION
;
2390 stats
->staop
[slot_idx
] = mystats
->ltopr
;
2391 stats
->stanumbers
[slot_idx
] = corrs
;
2392 stats
->numnumbers
[slot_idx
] = 1;
2396 else if (nonnull_cnt
== 0 && null_cnt
> 0)
2398 /* We found only nulls; assume the column is entirely null */
2399 stats
->stats_valid
= true;
2400 stats
->stanullfrac
= 1.0;
2402 stats
->stawidth
= 0; /* "unknown" */
2404 stats
->stawidth
= stats
->attrtype
->typlen
;
2405 stats
->stadistinct
= 0.0; /* "unknown" */
2408 /* We don't need to bother cleaning up any of our temporary palloc's */
2412 * qsort_arg comparator for sorting ScalarItems
2414 * Aside from sorting the items, we update the tupnoLink[] array
2415 * whenever two ScalarItems are found to contain equal datums. The array
2416 * is indexed by tupno; for each ScalarItem, it contains the highest
2417 * tupno that that item's datum has been found to be equal to. This allows
2418 * us to avoid additional comparisons in compute_scalar_stats().
2421 compare_scalars(const void *a
, const void *b
, void *arg
)
2423 Datum da
= ((ScalarItem
*) a
)->value
;
2424 int ta
= ((ScalarItem
*) a
)->tupno
;
2425 Datum db
= ((ScalarItem
*) b
)->value
;
2426 int tb
= ((ScalarItem
*) b
)->tupno
;
2427 CompareScalarsContext
*cxt
= (CompareScalarsContext
*) arg
;
2430 compare
= ApplySortFunction(cxt
->cmpFn
, cxt
->cmpFlags
,
2431 da
, false, db
, false);
2436 * The two datums are equal, so update cxt->tupnoLink[].
2438 if (cxt
->tupnoLink
[ta
] < tb
)
2439 cxt
->tupnoLink
[ta
] = tb
;
2440 if (cxt
->tupnoLink
[tb
] < ta
)
2441 cxt
->tupnoLink
[tb
] = ta
;
2444 * For equal datums, sort by tupno
2450 * qsort comparator for sorting ScalarMCVItems by position
2453 compare_mcvs(const void *a
, const void *b
)
2455 int da
= ((ScalarMCVItem
*) a
)->first
;
2456 int db
= ((ScalarMCVItem
*) b
)->first
;