1 /*-------------------------------------------------------------------------
4 * the Postgres statistics generator
6 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
13 *-------------------------------------------------------------------------
19 #include "access/heapam.h"
20 #include "access/transam.h"
21 #include "access/tuptoaster.h"
22 #include "access/xact.h"
23 #include "catalog/index.h"
24 #include "catalog/indexing.h"
25 #include "catalog/namespace.h"
26 #include "catalog/pg_namespace.h"
27 #include "commands/dbcommands.h"
28 #include "commands/vacuum.h"
29 #include "executor/executor.h"
30 #include "miscadmin.h"
31 #include "nodes/nodeFuncs.h"
32 #include "parser/parse_oper.h"
33 #include "parser/parse_relation.h"
35 #include "postmaster/autovacuum.h"
36 #include "storage/bufmgr.h"
37 #include "storage/proc.h"
38 #include "storage/procarray.h"
39 #include "utils/acl.h"
40 #include "utils/datum.h"
41 #include "utils/lsyscache.h"
42 #include "utils/memutils.h"
43 #include "utils/pg_rusage.h"
44 #include "utils/syscache.h"
45 #include "utils/tuplesort.h"
46 #include "utils/tqual.h"
49 /* Data structure for Algorithm S from Knuth 3.4.2 */
52 BlockNumber N
; /* number of blocks, known in advance */
53 int n
; /* desired sample size */
54 BlockNumber t
; /* current block number */
55 int m
; /* blocks selected so far */
57 typedef BlockSamplerData
*BlockSampler
;
59 /* Per-index data for ANALYZE */
60 typedef struct AnlIndexData
62 IndexInfo
*indexInfo
; /* BuildIndexInfo result */
63 double tupleFract
; /* fraction of rows for partial index */
64 VacAttrStats
**vacattrstats
; /* index attrs to analyze */
69 /* Default statistics target (GUC parameter) */
70 int default_statistics_target
= 10;
72 /* A few variables that don't seem worth passing around as parameters */
73 static int elevel
= -1;
75 static MemoryContext anl_context
= NULL
;
77 static BufferAccessStrategy vac_strategy
;
80 static void BlockSampler_Init(BlockSampler bs
, BlockNumber nblocks
,
82 static bool BlockSampler_HasMore(BlockSampler bs
);
83 static BlockNumber
BlockSampler_Next(BlockSampler bs
);
84 static void compute_index_stats(Relation onerel
, double totalrows
,
85 AnlIndexData
*indexdata
, int nindexes
,
86 HeapTuple
*rows
, int numrows
,
87 MemoryContext col_context
);
88 static VacAttrStats
*examine_attribute(Relation onerel
, int attnum
);
89 static int acquire_sample_rows(Relation onerel
, HeapTuple
*rows
,
90 int targrows
, double *totalrows
, double *totaldeadrows
);
91 static double random_fract(void);
92 static double init_selection_state(int n
);
93 static double get_next_S(double t
, int n
, double *stateptr
);
94 static int compare_rows(const void *a
, const void *b
);
95 static void update_attstats(Oid relid
, int natts
, VacAttrStats
**vacattrstats
);
96 static Datum
std_fetch_func(VacAttrStatsP stats
, int rownum
, bool *isNull
);
97 static Datum
ind_fetch_func(VacAttrStatsP stats
, int rownum
, bool *isNull
);
99 static bool std_typanalyze(VacAttrStats
*stats
);
103 * analyze_rel() -- analyze one relation
106 analyze_rel(Oid relid
, VacuumStmt
*vacstmt
,
107 BufferAccessStrategy bstrategy
)
117 bool analyzableindex
;
118 VacAttrStats
**vacattrstats
;
119 AnlIndexData
*indexdata
;
126 TimestampTz starttime
= 0;
130 if (vacstmt
->verbose
)
135 vac_strategy
= bstrategy
;
138 * Use the current context for storing analysis info. vacuum.c ensures
139 * that this context will be cleared when I return, thus releasing the
140 * memory allocated here.
142 anl_context
= CurrentMemoryContext
;
145 * Check for user-requested abort. Note we want this to be inside a
146 * transaction, so xact.c doesn't issue useless WARNING.
148 CHECK_FOR_INTERRUPTS();
151 * Open the relation, getting ShareUpdateExclusiveLock to ensure that two
152 * ANALYZEs don't run on it concurrently. (This also locks out a
153 * concurrent VACUUM, which doesn't matter much at the moment but might
154 * matter if we ever try to accumulate stats on dead tuples.) If the rel
155 * has been dropped since we last saw it, we don't need to process it.
157 onerel
= try_relation_open(relid
, ShareUpdateExclusiveLock
);
162 * Check permissions --- this should match vacuum's check!
164 if (!(pg_class_ownercheck(RelationGetRelid(onerel
), GetUserId()) ||
165 (pg_database_ownercheck(MyDatabaseId
, GetUserId()) && !onerel
->rd_rel
->relisshared
)))
167 /* No need for a WARNING if we already complained during VACUUM */
168 if (!vacstmt
->vacuum
)
170 if (onerel
->rd_rel
->relisshared
)
172 (errmsg("skipping \"%s\" --- only superuser can analyze it",
173 RelationGetRelationName(onerel
))));
174 else if (onerel
->rd_rel
->relnamespace
== PG_CATALOG_NAMESPACE
)
176 (errmsg("skipping \"%s\" --- only superuser or database owner can analyze it",
177 RelationGetRelationName(onerel
))));
180 (errmsg("skipping \"%s\" --- only table or database owner can analyze it",
181 RelationGetRelationName(onerel
))));
183 relation_close(onerel
, ShareUpdateExclusiveLock
);
188 * Check that it's a plain table; we used to do this in get_rel_oids() but
189 * seems safer to check after we've locked the relation.
191 if (onerel
->rd_rel
->relkind
!= RELKIND_RELATION
)
193 /* No need for a WARNING if we already complained during VACUUM */
194 if (!vacstmt
->vacuum
)
196 (errmsg("skipping \"%s\" --- cannot analyze indexes, views, or special system tables",
197 RelationGetRelationName(onerel
))));
198 relation_close(onerel
, ShareUpdateExclusiveLock
);
203 * Silently ignore tables that are temp tables of other backends ---
204 * trying to analyze these is rather pointless, since their contents are
205 * probably not up-to-date on disk. (We don't throw a warning here; it
206 * would just lead to chatter during a database-wide ANALYZE.)
208 if (isOtherTempNamespace(RelationGetNamespace(onerel
)))
210 relation_close(onerel
, ShareUpdateExclusiveLock
);
215 * We can ANALYZE any table except pg_statistic. See update_attstats
217 if (RelationGetRelid(onerel
) == StatisticRelationId
)
219 relation_close(onerel
, ShareUpdateExclusiveLock
);
224 (errmsg("analyzing \"%s.%s\"",
225 get_namespace_name(RelationGetNamespace(onerel
)),
226 RelationGetRelationName(onerel
))));
229 * Switch to the table owner's userid, so that any index functions are
232 GetUserIdAndContext(&save_userid
, &save_secdefcxt
);
233 SetUserIdAndContext(onerel
->rd_rel
->relowner
, true);
235 /* let others know what I'm doing */
236 LWLockAcquire(ProcArrayLock
, LW_EXCLUSIVE
);
237 MyProc
->vacuumFlags
|= PROC_IN_ANALYZE
;
238 LWLockRelease(ProcArrayLock
);
240 /* measure elapsed time iff autovacuum logging requires it */
241 if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration
>= 0)
243 pg_rusage_init(&ru0
);
244 if (Log_autovacuum_min_duration
> 0)
245 starttime
= GetCurrentTimestamp();
249 * Determine which columns to analyze
251 * Note that system attributes are never analyzed.
253 if (vacstmt
->va_cols
!= NIL
)
257 vacattrstats
= (VacAttrStats
**) palloc(list_length(vacstmt
->va_cols
) *
258 sizeof(VacAttrStats
*));
260 foreach(le
, vacstmt
->va_cols
)
262 char *col
= strVal(lfirst(le
));
264 i
= attnameAttNum(onerel
, col
, false);
265 if (i
== InvalidAttrNumber
)
267 (errcode(ERRCODE_UNDEFINED_COLUMN
),
268 errmsg("column \"%s\" of relation \"%s\" does not exist",
269 col
, RelationGetRelationName(onerel
))));
270 vacattrstats
[tcnt
] = examine_attribute(onerel
, i
);
271 if (vacattrstats
[tcnt
] != NULL
)
278 attr_cnt
= onerel
->rd_att
->natts
;
279 vacattrstats
= (VacAttrStats
**)
280 palloc(attr_cnt
* sizeof(VacAttrStats
*));
282 for (i
= 1; i
<= attr_cnt
; i
++)
284 vacattrstats
[tcnt
] = examine_attribute(onerel
, i
);
285 if (vacattrstats
[tcnt
] != NULL
)
292 * Open all indexes of the relation, and see if there are any analyzable
293 * columns in the indexes. We do not analyze index columns if there was
294 * an explicit column list in the ANALYZE command, however.
296 vac_open_indexes(onerel
, AccessShareLock
, &nindexes
, &Irel
);
297 hasindex
= (nindexes
> 0);
299 analyzableindex
= false;
302 indexdata
= (AnlIndexData
*) palloc0(nindexes
* sizeof(AnlIndexData
));
303 for (ind
= 0; ind
< nindexes
; ind
++)
305 AnlIndexData
*thisdata
= &indexdata
[ind
];
306 IndexInfo
*indexInfo
;
308 thisdata
->indexInfo
= indexInfo
= BuildIndexInfo(Irel
[ind
]);
309 thisdata
->tupleFract
= 1.0; /* fix later if partial */
310 if (indexInfo
->ii_Expressions
!= NIL
&& vacstmt
->va_cols
== NIL
)
312 ListCell
*indexpr_item
= list_head(indexInfo
->ii_Expressions
);
314 thisdata
->vacattrstats
= (VacAttrStats
**)
315 palloc(indexInfo
->ii_NumIndexAttrs
* sizeof(VacAttrStats
*));
317 for (i
= 0; i
< indexInfo
->ii_NumIndexAttrs
; i
++)
319 int keycol
= indexInfo
->ii_KeyAttrNumbers
[i
];
323 /* Found an index expression */
326 if (indexpr_item
== NULL
) /* shouldn't happen */
327 elog(ERROR
, "too few entries in indexprs list");
328 indexkey
= (Node
*) lfirst(indexpr_item
);
329 indexpr_item
= lnext(indexpr_item
);
332 * Can't analyze if the opclass uses a storage type
333 * different from the expression result type. We'd get
334 * confused because the type shown in pg_attribute for
335 * the index column doesn't match what we are getting
336 * from the expression. Perhaps this can be fixed
337 * someday, but for now, punt.
339 if (exprType(indexkey
) !=
340 Irel
[ind
]->rd_att
->attrs
[i
]->atttypid
)
343 thisdata
->vacattrstats
[tcnt
] =
344 examine_attribute(Irel
[ind
], i
+ 1);
345 if (thisdata
->vacattrstats
[tcnt
] != NULL
)
348 analyzableindex
= true;
352 thisdata
->attr_cnt
= tcnt
;
358 * Quit if no analyzable columns
360 if (attr_cnt
<= 0 && !analyzableindex
)
363 * We report that the table is empty; this is just so that the
364 * autovacuum code doesn't go nuts trying to get stats about a
367 if (!vacstmt
->vacuum
)
368 pgstat_report_analyze(onerel
, 0, 0);
373 * Determine how many rows we need to sample, using the worst case from
374 * all analyzable columns. We use a lower bound of 100 rows to avoid
375 * possible overflow in Vitter's algorithm.
378 for (i
= 0; i
< attr_cnt
; i
++)
380 if (targrows
< vacattrstats
[i
]->minrows
)
381 targrows
= vacattrstats
[i
]->minrows
;
383 for (ind
= 0; ind
< nindexes
; ind
++)
385 AnlIndexData
*thisdata
= &indexdata
[ind
];
387 for (i
= 0; i
< thisdata
->attr_cnt
; i
++)
389 if (targrows
< thisdata
->vacattrstats
[i
]->minrows
)
390 targrows
= thisdata
->vacattrstats
[i
]->minrows
;
395 * Acquire the sample rows
397 rows
= (HeapTuple
*) palloc(targrows
* sizeof(HeapTuple
));
398 numrows
= acquire_sample_rows(onerel
, rows
, targrows
,
399 &totalrows
, &totaldeadrows
);
402 * Compute the statistics. Temporary results during the calculations for
403 * each column are stored in a child context. The calc routines are
404 * responsible to make sure that whatever they store into the VacAttrStats
405 * structure is allocated in anl_context.
409 MemoryContext col_context
,
412 col_context
= AllocSetContextCreate(anl_context
,
414 ALLOCSET_DEFAULT_MINSIZE
,
415 ALLOCSET_DEFAULT_INITSIZE
,
416 ALLOCSET_DEFAULT_MAXSIZE
);
417 old_context
= MemoryContextSwitchTo(col_context
);
419 for (i
= 0; i
< attr_cnt
; i
++)
421 VacAttrStats
*stats
= vacattrstats
[i
];
424 stats
->tupDesc
= onerel
->rd_att
;
425 (*stats
->compute_stats
) (stats
,
429 MemoryContextResetAndDeleteChildren(col_context
);
433 compute_index_stats(onerel
, totalrows
,
438 MemoryContextSwitchTo(old_context
);
439 MemoryContextDelete(col_context
);
442 * Emit the completed stats rows into pg_statistic, replacing any
443 * previous statistics for the target columns. (If there are stats in
444 * pg_statistic for columns we didn't process, we leave them alone.)
446 update_attstats(relid
, attr_cnt
, vacattrstats
);
448 for (ind
= 0; ind
< nindexes
; ind
++)
450 AnlIndexData
*thisdata
= &indexdata
[ind
];
452 update_attstats(RelationGetRelid(Irel
[ind
]),
453 thisdata
->attr_cnt
, thisdata
->vacattrstats
);
458 * If we are running a standalone ANALYZE, update pages/tuples stats in
459 * pg_class. We know the accurate page count from the smgr, but only an
460 * approximate number of tuples; therefore, if we are part of VACUUM
461 * ANALYZE do *not* overwrite the accurate count already inserted by
462 * VACUUM. The same consideration applies to indexes.
464 if (!vacstmt
->vacuum
)
466 vac_update_relstats(RelationGetRelid(onerel
),
467 RelationGetNumberOfBlocks(onerel
),
469 InvalidTransactionId
);
471 for (ind
= 0; ind
< nindexes
; ind
++)
473 AnlIndexData
*thisdata
= &indexdata
[ind
];
474 double totalindexrows
;
476 totalindexrows
= ceil(thisdata
->tupleFract
* totalrows
);
477 vac_update_relstats(RelationGetRelid(Irel
[ind
]),
478 RelationGetNumberOfBlocks(Irel
[ind
]),
479 totalindexrows
, false,
480 InvalidTransactionId
);
483 /* report results to the stats collector, too */
484 pgstat_report_analyze(onerel
, totalrows
, totaldeadrows
);
487 /* We skip to here if there were no analyzable columns */
490 /* Done with indexes */
491 vac_close_indexes(nindexes
, Irel
, NoLock
);
494 * Close source relation now, but keep lock so that no one deletes it
495 * before we commit. (If someone did, they'd fail to clean up the entries
496 * we made in pg_statistic. Also, releasing the lock before commit would
497 * expose us to concurrent-update failures in update_attstats.)
499 relation_close(onerel
, NoLock
);
501 /* Log the action if appropriate */
502 if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration
>= 0)
504 if (Log_autovacuum_min_duration
== 0 ||
505 TimestampDifferenceExceeds(starttime
, GetCurrentTimestamp(),
506 Log_autovacuum_min_duration
))
508 (errmsg("automatic analyze of table \"%s.%s.%s\" system usage: %s",
509 get_database_name(MyDatabaseId
),
510 get_namespace_name(RelationGetNamespace(onerel
)),
511 RelationGetRelationName(onerel
),
512 pg_rusage_show(&ru0
))));
516 * Reset my PGPROC flag. Note: we need this here, and not in vacuum_rel,
517 * because the vacuum flag is cleared by the end-of-xact code.
519 LWLockAcquire(ProcArrayLock
, LW_EXCLUSIVE
);
520 MyProc
->vacuumFlags
&= ~PROC_IN_ANALYZE
;
521 LWLockRelease(ProcArrayLock
);
524 SetUserIdAndContext(save_userid
, save_secdefcxt
);
528 * Compute statistics about indexes of a relation
531 compute_index_stats(Relation onerel
, double totalrows
,
532 AnlIndexData
*indexdata
, int nindexes
,
533 HeapTuple
*rows
, int numrows
,
534 MemoryContext col_context
)
536 MemoryContext ind_context
,
538 Datum values
[INDEX_MAX_KEYS
];
539 bool isnull
[INDEX_MAX_KEYS
];
543 ind_context
= AllocSetContextCreate(anl_context
,
545 ALLOCSET_DEFAULT_MINSIZE
,
546 ALLOCSET_DEFAULT_INITSIZE
,
547 ALLOCSET_DEFAULT_MAXSIZE
);
548 old_context
= MemoryContextSwitchTo(ind_context
);
550 for (ind
= 0; ind
< nindexes
; ind
++)
552 AnlIndexData
*thisdata
= &indexdata
[ind
];
553 IndexInfo
*indexInfo
= thisdata
->indexInfo
;
554 int attr_cnt
= thisdata
->attr_cnt
;
555 TupleTableSlot
*slot
;
557 ExprContext
*econtext
;
564 double totalindexrows
;
566 /* Ignore index if no columns to analyze and not partial */
567 if (attr_cnt
== 0 && indexInfo
->ii_Predicate
== NIL
)
571 * Need an EState for evaluation of index expressions and
572 * partial-index predicates. Create it in the per-index context to be
573 * sure it gets cleaned up at the bottom of the loop.
575 estate
= CreateExecutorState();
576 econtext
= GetPerTupleExprContext(estate
);
577 /* Need a slot to hold the current heap tuple, too */
578 slot
= MakeSingleTupleTableSlot(RelationGetDescr(onerel
));
580 /* Arrange for econtext's scan tuple to be the tuple under test */
581 econtext
->ecxt_scantuple
= slot
;
583 /* Set up execution state for predicate. */
585 ExecPrepareExpr((Expr
*) indexInfo
->ii_Predicate
,
588 /* Compute and save index expression values */
589 exprvals
= (Datum
*) palloc(numrows
* attr_cnt
* sizeof(Datum
));
590 exprnulls
= (bool *) palloc(numrows
* attr_cnt
* sizeof(bool));
593 for (rowno
= 0; rowno
< numrows
; rowno
++)
595 HeapTuple heapTuple
= rows
[rowno
];
597 /* Set up for predicate or expression evaluation */
598 ExecStoreTuple(heapTuple
, slot
, InvalidBuffer
, false);
600 /* If index is partial, check predicate */
601 if (predicate
!= NIL
)
603 if (!ExecQual(predicate
, econtext
, false))
611 * Evaluate the index row to compute expression values. We
612 * could do this by hand, but FormIndexDatum is convenient.
614 FormIndexDatum(indexInfo
,
621 * Save just the columns we care about.
623 for (i
= 0; i
< attr_cnt
; i
++)
625 VacAttrStats
*stats
= thisdata
->vacattrstats
[i
];
626 int attnum
= stats
->attr
->attnum
;
628 exprvals
[tcnt
] = values
[attnum
- 1];
629 exprnulls
[tcnt
] = isnull
[attnum
- 1];
636 * Having counted the number of rows that pass the predicate in the
637 * sample, we can estimate the total number of rows in the index.
639 thisdata
->tupleFract
= (double) numindexrows
/ (double) numrows
;
640 totalindexrows
= ceil(thisdata
->tupleFract
* totalrows
);
643 * Now we can compute the statistics for the expression columns.
645 if (numindexrows
> 0)
647 MemoryContextSwitchTo(col_context
);
648 for (i
= 0; i
< attr_cnt
; i
++)
650 VacAttrStats
*stats
= thisdata
->vacattrstats
[i
];
652 stats
->exprvals
= exprvals
+ i
;
653 stats
->exprnulls
= exprnulls
+ i
;
654 stats
->rowstride
= attr_cnt
;
655 (*stats
->compute_stats
) (stats
,
659 MemoryContextResetAndDeleteChildren(col_context
);
664 MemoryContextSwitchTo(ind_context
);
666 ExecDropSingleTupleTableSlot(slot
);
667 FreeExecutorState(estate
);
668 MemoryContextResetAndDeleteChildren(ind_context
);
671 MemoryContextSwitchTo(old_context
);
672 MemoryContextDelete(ind_context
);
676 * examine_attribute -- pre-analysis of a single column
678 * Determine whether the column is analyzable; if so, create and initialize
679 * a VacAttrStats struct for it. If not, return NULL.
681 static VacAttrStats
*
682 examine_attribute(Relation onerel
, int attnum
)
684 Form_pg_attribute attr
= onerel
->rd_att
->attrs
[attnum
- 1];
690 /* Never analyze dropped columns */
691 if (attr
->attisdropped
)
694 /* Don't analyze column if user has specified not to */
695 if (attr
->attstattarget
== 0)
699 * Create the VacAttrStats struct.
701 stats
= (VacAttrStats
*) palloc0(sizeof(VacAttrStats
));
702 stats
->attr
= (Form_pg_attribute
) palloc(ATTRIBUTE_TUPLE_SIZE
);
703 memcpy(stats
->attr
, attr
, ATTRIBUTE_TUPLE_SIZE
);
704 typtuple
= SearchSysCache(TYPEOID
,
705 ObjectIdGetDatum(attr
->atttypid
),
707 if (!HeapTupleIsValid(typtuple
))
708 elog(ERROR
, "cache lookup failed for type %u", attr
->atttypid
);
709 stats
->attrtype
= (Form_pg_type
) palloc(sizeof(FormData_pg_type
));
710 memcpy(stats
->attrtype
, GETSTRUCT(typtuple
), sizeof(FormData_pg_type
));
711 ReleaseSysCache(typtuple
);
712 stats
->anl_context
= anl_context
;
713 stats
->tupattnum
= attnum
;
716 * The fields describing the stats->stavalues[n] element types default
717 * to the type of the field being analyzed, but the type-specific
718 * typanalyze function can change them if it wants to store something
721 for (i
= 0; i
< STATISTIC_NUM_SLOTS
; i
++)
723 stats
->statypid
[i
] = stats
->attr
->atttypid
;
724 stats
->statyplen
[i
] = stats
->attrtype
->typlen
;
725 stats
->statypbyval
[i
] = stats
->attrtype
->typbyval
;
726 stats
->statypalign
[i
] = stats
->attrtype
->typalign
;
730 * Call the type-specific typanalyze function. If none is specified, use
733 if (OidIsValid(stats
->attrtype
->typanalyze
))
734 ok
= DatumGetBool(OidFunctionCall1(stats
->attrtype
->typanalyze
,
735 PointerGetDatum(stats
)));
737 ok
= std_typanalyze(stats
);
739 if (!ok
|| stats
->compute_stats
== NULL
|| stats
->minrows
<= 0)
741 pfree(stats
->attrtype
);
751 * BlockSampler_Init -- prepare for random sampling of blocknumbers
753 * BlockSampler is used for stage one of our new two-stage tuple
754 * sampling mechanism as discussed on pgsql-hackers 2004-04-02 (subject
755 * "Large DB"). It selects a random sample of samplesize blocks out of
756 * the nblocks blocks in the table. If the table has less than
757 * samplesize blocks, all blocks are selected.
759 * Since we know the total number of blocks in advance, we can use the
760 * straightforward Algorithm S from Knuth 3.4.2, rather than Vitter's
764 BlockSampler_Init(BlockSampler bs
, BlockNumber nblocks
, int samplesize
)
766 bs
->N
= nblocks
; /* measured table size */
769 * If we decide to reduce samplesize for tables that have less or not much
770 * more than samplesize blocks, here is the place to do it.
773 bs
->t
= 0; /* blocks scanned so far */
774 bs
->m
= 0; /* blocks selected so far */
778 BlockSampler_HasMore(BlockSampler bs
)
780 return (bs
->t
< bs
->N
) && (bs
->m
< bs
->n
);
784 BlockSampler_Next(BlockSampler bs
)
786 BlockNumber K
= bs
->N
- bs
->t
; /* remaining blocks */
787 int k
= bs
->n
- bs
->m
; /* blocks still to sample */
788 double p
; /* probability to skip block */
789 double V
; /* random */
791 Assert(BlockSampler_HasMore(bs
)); /* hence K > 0 and k > 0 */
793 if ((BlockNumber
) k
>= K
)
795 /* need all the rest */
801 * It is not obvious that this code matches Knuth's Algorithm S.
802 * Knuth says to skip the current block with probability 1 - k/K.
803 * If we are to skip, we should advance t (hence decrease K), and
804 * repeat the same probabilistic test for the next block. The naive
805 * implementation thus requires a random_fract() call for each block
806 * number. But we can reduce this to one random_fract() call per
807 * selected block, by noting that each time the while-test succeeds,
808 * we can reinterpret V as a uniform random number in the range 0 to p.
809 * Therefore, instead of choosing a new V, we just adjust p to be
810 * the appropriate fraction of its former value, and our next loop
811 * makes the appropriate probabilistic test.
813 * We have initially K > k > 0. If the loop reduces K to equal k,
814 * the next while-test must fail since p will become exactly zero
815 * (we assume there will not be roundoff error in the division).
816 * (Note: Knuth suggests a "<=" loop condition, but we use "<" just
817 * to be doubly sure about roundoff error.) Therefore K cannot become
818 * less than k, which means that we cannot fail to select enough blocks.
822 p
= 1.0 - (double) k
/ (double) K
;
827 K
--; /* keep K == N - t */
829 /* adjust p to be new cutoff point in reduced range */
830 p
*= 1.0 - (double) k
/ (double) K
;
839 * acquire_sample_rows -- acquire a random sample of rows from the table
841 * As of May 2004 we use a new two-stage method: Stage one selects up
842 * to targrows random blocks (or all blocks, if there aren't so many).
843 * Stage two scans these blocks and uses the Vitter algorithm to create
844 * a random sample of targrows rows (or less, if there are less in the
845 * sample of blocks). The two stages are executed simultaneously: each
846 * block is processed as soon as stage one returns its number and while
847 * the rows are read stage two controls which ones are to be inserted
850 * Although every row has an equal chance of ending up in the final
851 * sample, this sampling method is not perfect: not every possible
852 * sample has an equal chance of being selected. For large relations
853 * the number of different blocks represented by the sample tends to be
854 * too small. We can live with that for now. Improvements are welcome.
856 * We also estimate the total numbers of live and dead rows in the table,
857 * and return them into *totalrows and *totaldeadrows, respectively.
859 * An important property of this sampling method is that because we do
860 * look at a statistically unbiased set of blocks, we should get
861 * unbiased estimates of the average numbers of live and dead rows per
862 * block. The previous sampling method put too much credence in the row
863 * density near the start of the table.
865 * The returned list of tuples is in order by physical position in the table.
866 * (We will rely on this later to derive correlation estimates.)
869 acquire_sample_rows(Relation onerel
, HeapTuple
*rows
, int targrows
,
870 double *totalrows
, double *totaldeadrows
)
872 int numrows
= 0; /* # rows now in reservoir */
873 double samplerows
= 0; /* total # rows collected */
874 double liverows
= 0; /* # live rows seen */
875 double deadrows
= 0; /* # dead rows seen */
876 double rowstoskip
= -1; /* -1 means not set yet */
877 BlockNumber totalblocks
;
878 TransactionId OldestXmin
;
882 Assert(targrows
> 1);
884 totalblocks
= RelationGetNumberOfBlocks(onerel
);
886 /* Need a cutoff xmin for HeapTupleSatisfiesVacuum */
887 OldestXmin
= GetOldestXmin(onerel
->rd_rel
->relisshared
, true);
889 /* Prepare for sampling block numbers */
890 BlockSampler_Init(&bs
, totalblocks
, targrows
);
891 /* Prepare for sampling rows */
892 rstate
= init_selection_state(targrows
);
894 /* Outer loop over blocks to sample */
895 while (BlockSampler_HasMore(&bs
))
897 BlockNumber targblock
= BlockSampler_Next(&bs
);
900 OffsetNumber targoffset
,
903 vacuum_delay_point();
906 * We must maintain a pin on the target page's buffer to ensure that
907 * the maxoffset value stays good (else concurrent VACUUM might delete
908 * tuples out from under us). Hence, pin the page until we are done
909 * looking at it. We also choose to hold sharelock on the buffer
910 * throughout --- we could release and re-acquire sharelock for
911 * each tuple, but since we aren't doing much work per tuple, the
912 * extra lock traffic is probably better avoided.
914 targbuffer
= ReadBufferWithStrategy(onerel
, targblock
, vac_strategy
);
915 LockBuffer(targbuffer
, BUFFER_LOCK_SHARE
);
916 targpage
= BufferGetPage(targbuffer
);
917 maxoffset
= PageGetMaxOffsetNumber(targpage
);
919 /* Inner loop over all tuples on the selected page */
920 for (targoffset
= FirstOffsetNumber
; targoffset
<= maxoffset
; targoffset
++)
923 HeapTupleData targtuple
;
924 bool sample_it
= false;
926 itemid
= PageGetItemId(targpage
, targoffset
);
929 * We ignore unused and redirect line pointers. DEAD line
930 * pointers should be counted as dead, because we need vacuum
931 * to run to get rid of them. Note that this rule agrees with
932 * the way that heap_page_prune() counts things.
934 if (!ItemIdIsNormal(itemid
))
936 if (ItemIdIsDead(itemid
))
941 ItemPointerSet(&targtuple
.t_self
, targblock
, targoffset
);
943 targtuple
.t_data
= (HeapTupleHeader
) PageGetItem(targpage
, itemid
);
944 targtuple
.t_len
= ItemIdGetLength(itemid
);
946 switch (HeapTupleSatisfiesVacuum(targtuple
.t_data
,
956 case HEAPTUPLE_RECENTLY_DEAD
:
957 /* Count dead and recently-dead rows */
961 case HEAPTUPLE_INSERT_IN_PROGRESS
:
963 * Insert-in-progress rows are not counted. We assume
964 * that when the inserting transaction commits or aborts,
965 * it will send a stats message to increment the proper
966 * count. This works right only if that transaction ends
967 * after we finish analyzing the table; if things happen
968 * in the other order, its stats update will be
969 * overwritten by ours. However, the error will be
970 * large only if the other transaction runs long enough
971 * to insert many tuples, so assuming it will finish
972 * after us is the safer option.
974 * A special case is that the inserting transaction might
975 * be our own. In this case we should count and sample
976 * the row, to accommodate users who load a table and
977 * analyze it in one transaction. (pgstat_report_analyze
978 * has to adjust the numbers we send to the stats collector
979 * to make this come out right.)
981 if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(targtuple
.t_data
)))
988 case HEAPTUPLE_DELETE_IN_PROGRESS
:
990 * We count delete-in-progress rows as still live, using
991 * the same reasoning given above; but we don't bother to
992 * include them in the sample.
994 * If the delete was done by our own transaction, however,
995 * we must count the row as dead to make
996 * pgstat_report_analyze's stats adjustments come out
997 * right. (Note: this works out properly when the row
998 * was both inserted and deleted in our xact.)
1000 if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmax(targtuple
.t_data
)))
1007 elog(ERROR
, "unexpected HeapTupleSatisfiesVacuum result");
1014 * The first targrows sample rows are simply copied into the
1015 * reservoir. Then we start replacing tuples in the sample
1016 * until we reach the end of the relation. This algorithm is
1017 * from Jeff Vitter's paper (see full citation below). It
1018 * works by repeatedly computing the number of tuples to skip
1019 * before selecting a tuple, which replaces a randomly chosen
1020 * element of the reservoir (current set of tuples). At all
1021 * times the reservoir is a true random sample of the tuples
1022 * we've passed over so far, so when we fall off the end of
1023 * the relation we're done.
1025 if (numrows
< targrows
)
1026 rows
[numrows
++] = heap_copytuple(&targtuple
);
1030 * t in Vitter's paper is the number of records already
1031 * processed. If we need to compute a new S value, we
1032 * must use the not-yet-incremented value of samplerows
1036 rowstoskip
= get_next_S(samplerows
, targrows
, &rstate
);
1038 if (rowstoskip
<= 0)
1041 * Found a suitable tuple, so save it, replacing one
1042 * old tuple at random
1044 int k
= (int) (targrows
* random_fract());
1046 Assert(k
>= 0 && k
< targrows
);
1047 heap_freetuple(rows
[k
]);
1048 rows
[k
] = heap_copytuple(&targtuple
);
1058 /* Now release the lock and pin on the page */
1059 UnlockReleaseBuffer(targbuffer
);
1063 * If we didn't find as many tuples as we wanted then we're done. No sort
1064 * is needed, since they're already in order.
1066 * Otherwise we need to sort the collected tuples by position
1067 * (itempointer). It's not worth worrying about corner cases where the
1068 * tuples are already sorted.
1070 if (numrows
== targrows
)
1071 qsort((void *) rows
, numrows
, sizeof(HeapTuple
), compare_rows
);
1074 * Estimate total numbers of rows in relation.
1078 *totalrows
= floor((liverows
* totalblocks
) / bs
.m
+ 0.5);
1079 *totaldeadrows
= floor((deadrows
* totalblocks
) / bs
.m
+ 0.5);
1084 *totaldeadrows
= 0.0;
1088 * Emit some interesting relation info
1091 (errmsg("\"%s\": scanned %d of %u pages, "
1092 "containing %.0f live rows and %.0f dead rows; "
1093 "%d rows in sample, %.0f estimated total rows",
1094 RelationGetRelationName(onerel
),
1097 numrows
, *totalrows
)));
1102 /* Select a random value R uniformly distributed in (0 - 1) */
1106 return ((double) random() + 1) / ((double) MAX_RANDOM_VALUE
+ 2);
1110 * These two routines embody Algorithm Z from "Random sampling with a
1111 * reservoir" by Jeffrey S. Vitter, in ACM Trans. Math. Softw. 11, 1
1112 * (Mar. 1985), Pages 37-57. Vitter describes his algorithm in terms
1113 * of the count S of records to skip before processing another record.
1114 * It is computed primarily based on t, the number of records already read.
1115 * The only extra state needed between calls is W, a random state variable.
1117 * init_selection_state computes the initial W value.
1119 * Given that we've already read t records (t >= n), get_next_S
1120 * determines the number of records to skip before the next record is
1124 init_selection_state(int n
)
1126 /* Initial value of W (for use when Algorithm Z is first applied) */
1127 return exp(-log(random_fract()) / n
);
1131 get_next_S(double t
, int n
, double *stateptr
)
1135 /* The magic constant here is T from Vitter's paper */
1136 if (t
<= (22.0 * n
))
1138 /* Process records using Algorithm X until t is large enough */
1142 V
= random_fract(); /* Generate V */
1145 /* Note: "num" in Vitter's code is always equal to t - n */
1146 quot
= (t
- (double) n
) / t
;
1147 /* Find min S satisfying (4.1) */
1152 quot
*= (t
- (double) n
) / t
;
1157 /* Now apply Algorithm Z */
1158 double W
= *stateptr
;
1159 double term
= t
- (double) n
+ 1;
1173 /* Generate U and X */
1176 S
= floor(X
); /* S is tentatively set to floor(X) */
1177 /* Test if U <= h(S)/cg(X) in the manner of (6.3) */
1178 tmp
= (t
+ 1) / term
;
1179 lhs
= exp(log(((U
* tmp
* tmp
) * (term
+ S
)) / (t
+ X
)) / n
);
1180 rhs
= (((t
+ X
) / (term
+ S
)) * term
) / t
;
1186 /* Test if U <= f(S)/cg(X) */
1187 y
= (((U
* (t
+ 1)) / term
) * (t
+ S
+ 1)) / (t
+ X
);
1191 numer_lim
= term
+ S
;
1195 denom
= t
- (double) n
+ S
;
1198 for (numer
= t
+ S
; numer
>= numer_lim
; numer
-= 1)
1203 W
= exp(-log(random_fract()) / n
); /* Generate W in advance */
1204 if (exp(log(y
) / n
) <= (t
+ X
) / t
)
1213 * qsort comparator for sorting rows[] array
1216 compare_rows(const void *a
, const void *b
)
1218 HeapTuple ha
= *(HeapTuple
*) a
;
1219 HeapTuple hb
= *(HeapTuple
*) b
;
1220 BlockNumber ba
= ItemPointerGetBlockNumber(&ha
->t_self
);
1221 OffsetNumber oa
= ItemPointerGetOffsetNumber(&ha
->t_self
);
1222 BlockNumber bb
= ItemPointerGetBlockNumber(&hb
->t_self
);
1223 OffsetNumber ob
= ItemPointerGetOffsetNumber(&hb
->t_self
);
1238 * update_attstats() -- update attribute statistics for one relation
1240 * Statistics are stored in several places: the pg_class row for the
1241 * relation has stats about the whole relation, and there is a
1242 * pg_statistic row for each (non-system) attribute that has ever
1243 * been analyzed. The pg_class values are updated by VACUUM, not here.
1245 * pg_statistic rows are just added or updated normally. This means
1246 * that pg_statistic will probably contain some deleted rows at the
1247 * completion of a vacuum cycle, unless it happens to get vacuumed last.
1249 * To keep things simple, we punt for pg_statistic, and don't try
1250 * to compute or store rows for pg_statistic itself in pg_statistic.
1251 * This could possibly be made to work, but it's not worth the trouble.
1252 * Note analyze_rel() has seen to it that we won't come here when
1253 * vacuuming pg_statistic itself.
1255 * Note: there would be a race condition here if two backends could
1256 * ANALYZE the same table concurrently. Presently, we lock that out
1257 * by taking a self-exclusive lock on the relation in analyze_rel().
1260 update_attstats(Oid relid
, int natts
, VacAttrStats
**vacattrstats
)
1266 return; /* nothing to do */
1268 sd
= heap_open(StatisticRelationId
, RowExclusiveLock
);
1270 for (attno
= 0; attno
< natts
; attno
++)
1272 VacAttrStats
*stats
= vacattrstats
[attno
];
1278 Datum values
[Natts_pg_statistic
];
1279 char nulls
[Natts_pg_statistic
];
1280 char replaces
[Natts_pg_statistic
];
1282 /* Ignore attr if we weren't able to collect stats */
1283 if (!stats
->stats_valid
)
1287 * Construct a new pg_statistic tuple
1289 for (i
= 0; i
< Natts_pg_statistic
; ++i
)
1296 values
[i
++] = ObjectIdGetDatum(relid
); /* starelid */
1297 values
[i
++] = Int16GetDatum(stats
->attr
->attnum
); /* staattnum */
1298 values
[i
++] = Float4GetDatum(stats
->stanullfrac
); /* stanullfrac */
1299 values
[i
++] = Int32GetDatum(stats
->stawidth
); /* stawidth */
1300 values
[i
++] = Float4GetDatum(stats
->stadistinct
); /* stadistinct */
1301 for (k
= 0; k
< STATISTIC_NUM_SLOTS
; k
++)
1303 values
[i
++] = Int16GetDatum(stats
->stakind
[k
]); /* stakindN */
1305 for (k
= 0; k
< STATISTIC_NUM_SLOTS
; k
++)
1307 values
[i
++] = ObjectIdGetDatum(stats
->staop
[k
]); /* staopN */
1309 for (k
= 0; k
< STATISTIC_NUM_SLOTS
; k
++)
1311 int nnum
= stats
->numnumbers
[k
];
1315 Datum
*numdatums
= (Datum
*) palloc(nnum
* sizeof(Datum
));
1318 for (n
= 0; n
< nnum
; n
++)
1319 numdatums
[n
] = Float4GetDatum(stats
->stanumbers
[k
][n
]);
1320 /* XXX knows more than it should about type float4: */
1321 arry
= construct_array(numdatums
, nnum
,
1323 sizeof(float4
), FLOAT4PASSBYVAL
, 'i');
1324 values
[i
++] = PointerGetDatum(arry
); /* stanumbersN */
1329 values
[i
++] = (Datum
) 0;
1332 for (k
= 0; k
< STATISTIC_NUM_SLOTS
; k
++)
1334 if (stats
->numvalues
[k
] > 0)
1338 arry
= construct_array(stats
->stavalues
[k
],
1339 stats
->numvalues
[k
],
1341 stats
->statyplen
[k
],
1342 stats
->statypbyval
[k
],
1343 stats
->statypalign
[k
]);
1344 values
[i
++] = PointerGetDatum(arry
); /* stavaluesN */
1349 values
[i
++] = (Datum
) 0;
1353 /* Is there already a pg_statistic tuple for this attribute? */
1354 oldtup
= SearchSysCache(STATRELATT
,
1355 ObjectIdGetDatum(relid
),
1356 Int16GetDatum(stats
->attr
->attnum
),
1359 if (HeapTupleIsValid(oldtup
))
1361 /* Yes, replace it */
1362 stup
= heap_modifytuple(oldtup
,
1363 RelationGetDescr(sd
),
1367 ReleaseSysCache(oldtup
);
1368 simple_heap_update(sd
, &stup
->t_self
, stup
);
1372 /* No, insert new tuple */
1373 stup
= heap_formtuple(RelationGetDescr(sd
), values
, nulls
);
1374 simple_heap_insert(sd
, stup
);
1377 /* update indexes too */
1378 CatalogUpdateIndexes(sd
, stup
);
1380 heap_freetuple(stup
);
1383 heap_close(sd
, RowExclusiveLock
);
1387 * Standard fetch function for use by compute_stats subroutines.
1389 * This exists to provide some insulation between compute_stats routines
1390 * and the actual storage of the sample data.
1393 std_fetch_func(VacAttrStatsP stats
, int rownum
, bool *isNull
)
1395 int attnum
= stats
->tupattnum
;
1396 HeapTuple tuple
= stats
->rows
[rownum
];
1397 TupleDesc tupDesc
= stats
->tupDesc
;
1399 return heap_getattr(tuple
, attnum
, tupDesc
, isNull
);
1403 * Fetch function for analyzing index expressions.
1405 * We have not bothered to construct index tuples, instead the data is
1406 * just in Datum arrays.
1409 ind_fetch_func(VacAttrStatsP stats
, int rownum
, bool *isNull
)
1413 /* exprvals and exprnulls are already offset for proper column */
1414 i
= rownum
* stats
->rowstride
;
1415 *isNull
= stats
->exprnulls
[i
];
1416 return stats
->exprvals
[i
];
1420 /*==========================================================================
1422 * Code below this point represents the "standard" type-specific statistics
1423 * analysis algorithms. This code can be replaced on a per-data-type basis
1424 * by setting a nonzero value in pg_type.typanalyze.
1426 *==========================================================================
1431 * To avoid consuming too much memory during analysis and/or too much space
1432 * in the resulting pg_statistic rows, we ignore varlena datums that are wider
1433 * than WIDTH_THRESHOLD (after detoasting!). This is legitimate for MCV
1434 * and distinct-value calculations since a wide value is unlikely to be
1435 * duplicated at all, much less be a most-common value. For the same reason,
1436 * ignoring wide values will not affect our estimates of histogram bin
1437 * boundaries very much.
1439 #define WIDTH_THRESHOLD 1024
1441 #define swapInt(a,b) do {int _tmp; _tmp=a; a=b; b=_tmp;} while(0)
1442 #define swapDatum(a,b) do {Datum _tmp; _tmp=a; a=b; b=_tmp;} while(0)
1445 * Extra information used by the default analysis routines
1449 Oid eqopr
; /* '=' operator for datatype, if any */
1450 Oid eqfunc
; /* and associated function */
1451 Oid ltopr
; /* '<' operator for datatype, if any */
1456 Datum value
; /* a data value */
1457 int tupno
; /* position index for tuple it came from */
1462 int count
; /* # of duplicates */
1463 int first
; /* values[] index of first occurrence */
1471 } CompareScalarsContext
;
1474 static void compute_minimal_stats(VacAttrStatsP stats
,
1475 AnalyzeAttrFetchFunc fetchfunc
,
1478 static void compute_scalar_stats(VacAttrStatsP stats
,
1479 AnalyzeAttrFetchFunc fetchfunc
,
1482 static int compare_scalars(const void *a
, const void *b
, void *arg
);
1483 static int compare_mcvs(const void *a
, const void *b
);
1487 * std_typanalyze -- the default type-specific typanalyze function
1490 std_typanalyze(VacAttrStats
*stats
)
1492 Form_pg_attribute attr
= stats
->attr
;
1495 StdAnalyzeData
*mystats
;
1497 /* If the attstattarget column is negative, use the default value */
1498 /* NB: it is okay to scribble on stats->attr since it's a copy */
1499 if (attr
->attstattarget
< 0)
1500 attr
->attstattarget
= default_statistics_target
;
1502 /* Look for default "<" and "=" operators for column's type */
1503 get_sort_group_operators(attr
->atttypid
,
1504 false, false, false,
1505 <opr
, &eqopr
, NULL
);
1507 /* If column has no "=" operator, we can't do much of anything */
1508 if (!OidIsValid(eqopr
))
1511 /* Save the operator info for compute_stats routines */
1512 mystats
= (StdAnalyzeData
*) palloc(sizeof(StdAnalyzeData
));
1513 mystats
->eqopr
= eqopr
;
1514 mystats
->eqfunc
= get_opcode(eqopr
);
1515 mystats
->ltopr
= ltopr
;
1516 stats
->extra_data
= mystats
;
1519 * Determine which standard statistics algorithm to use
1521 if (OidIsValid(ltopr
))
1523 /* Seems to be a scalar datatype */
1524 stats
->compute_stats
= compute_scalar_stats
;
1525 /*--------------------
1526 * The following choice of minrows is based on the paper
1527 * "Random sampling for histogram construction: how much is enough?"
1528 * by Surajit Chaudhuri, Rajeev Motwani and Vivek Narasayya, in
1529 * Proceedings of ACM SIGMOD International Conference on Management
1530 * of Data, 1998, Pages 436-447. Their Corollary 1 to Theorem 5
1531 * says that for table size n, histogram size k, maximum relative
1532 * error in bin size f, and error probability gamma, the minimum
1533 * random sample size is
1534 * r = 4 * k * ln(2*n/gamma) / f^2
1535 * Taking f = 0.5, gamma = 0.01, n = 1 million rows, we obtain
1537 * Note that because of the log function, the dependence on n is
1538 * quite weak; even at n = 1 billion, a 300*k sample gives <= 0.59
1539 * bin size error with probability 0.99. So there's no real need to
1540 * scale for n, which is a good thing because we don't necessarily
1541 * know it at this point.
1542 *--------------------
1544 stats
->minrows
= 300 * attr
->attstattarget
;
1548 /* Can't do much but the minimal stuff */
1549 stats
->compute_stats
= compute_minimal_stats
;
1550 /* Might as well use the same minrows as above */
1551 stats
->minrows
= 300 * attr
->attstattarget
;
1558 * compute_minimal_stats() -- compute minimal column statistics
1560 * We use this when we can find only an "=" operator for the datatype.
1562 * We determine the fraction of non-null rows, the average width, the
1563 * most common values, and the (estimated) number of distinct values.
1565 * The most common values are determined by brute force: we keep a list
1566 * of previously seen values, ordered by number of times seen, as we scan
1567 * the samples. A newly seen value is inserted just after the last
1568 * multiply-seen value, causing the bottommost (oldest) singly-seen value
1569 * to drop off the list. The accuracy of this method, and also its cost,
1570 * depend mainly on the length of the list we are willing to keep.
1573 compute_minimal_stats(VacAttrStatsP stats
,
1574 AnalyzeAttrFetchFunc fetchfunc
,
1580 int nonnull_cnt
= 0;
1581 int toowide_cnt
= 0;
1582 double total_width
= 0;
1583 bool is_varlena
= (!stats
->attr
->attbyval
&&
1584 stats
->attr
->attlen
== -1);
1585 bool is_varwidth
= (!stats
->attr
->attbyval
&&
1586 stats
->attr
->attlen
< 0);
1596 int num_mcv
= stats
->attr
->attstattarget
;
1597 StdAnalyzeData
*mystats
= (StdAnalyzeData
*) stats
->extra_data
;
1600 * We track up to 2*n values for an n-element MCV list; but at least 10
1602 track_max
= 2 * num_mcv
;
1605 track
= (TrackItem
*) palloc(track_max
* sizeof(TrackItem
));
1608 fmgr_info(mystats
->eqfunc
, &f_cmpeq
);
1610 for (i
= 0; i
< samplerows
; i
++)
1618 vacuum_delay_point();
1620 value
= fetchfunc(stats
, i
, &isnull
);
1622 /* Check for null/nonnull */
1631 * If it's a variable-width field, add up widths for average width
1632 * calculation. Note that if the value is toasted, we use the toasted
1633 * width. We don't bother with this calculation if it's a fixed-width
1638 total_width
+= VARSIZE_ANY(DatumGetPointer(value
));
1641 * If the value is toasted, we want to detoast it just once to
1642 * avoid repeated detoastings and resultant excess memory usage
1643 * during the comparisons. Also, check to see if the value is
1644 * excessively wide, and if so don't detoast at all --- just
1647 if (toast_raw_datum_size(value
) > WIDTH_THRESHOLD
)
1652 value
= PointerGetDatum(PG_DETOAST_DATUM(value
));
1654 else if (is_varwidth
)
1656 /* must be cstring */
1657 total_width
+= strlen(DatumGetCString(value
)) + 1;
1661 * See if the value matches anything we're already tracking.
1664 firstcount1
= track_cnt
;
1665 for (j
= 0; j
< track_cnt
; j
++)
1667 if (DatumGetBool(FunctionCall2(&f_cmpeq
, value
, track
[j
].value
)))
1672 if (j
< firstcount1
&& track
[j
].count
== 1)
1680 /* This value may now need to "bubble up" in the track list */
1681 while (j
> 0 && track
[j
].count
> track
[j
- 1].count
)
1683 swapDatum(track
[j
].value
, track
[j
- 1].value
);
1684 swapInt(track
[j
].count
, track
[j
- 1].count
);
1690 /* No match. Insert at head of count-1 list */
1691 if (track_cnt
< track_max
)
1693 for (j
= track_cnt
- 1; j
> firstcount1
; j
--)
1695 track
[j
].value
= track
[j
- 1].value
;
1696 track
[j
].count
= track
[j
- 1].count
;
1698 if (firstcount1
< track_cnt
)
1700 track
[firstcount1
].value
= value
;
1701 track
[firstcount1
].count
= 1;
1706 /* We can only compute real stats if we found some non-null values. */
1707 if (nonnull_cnt
> 0)
1712 stats
->stats_valid
= true;
1713 /* Do the simple null-frac and width stats */
1714 stats
->stanullfrac
= (double) null_cnt
/ (double) samplerows
;
1716 stats
->stawidth
= total_width
/ (double) nonnull_cnt
;
1718 stats
->stawidth
= stats
->attrtype
->typlen
;
1720 /* Count the number of values we found multiple times */
1722 for (nmultiple
= 0; nmultiple
< track_cnt
; nmultiple
++)
1724 if (track
[nmultiple
].count
== 1)
1726 summultiple
+= track
[nmultiple
].count
;
1731 /* If we found no repeated values, assume it's a unique column */
1732 stats
->stadistinct
= -1.0;
1734 else if (track_cnt
< track_max
&& toowide_cnt
== 0 &&
1735 nmultiple
== track_cnt
)
1738 * Our track list includes every value in the sample, and every
1739 * value appeared more than once. Assume the column has just
1742 stats
->stadistinct
= track_cnt
;
1747 * Estimate the number of distinct values using the estimator
1748 * proposed by Haas and Stokes in IBM Research Report RJ 10025:
1749 * n*d / (n - f1 + f1*n/N)
1750 * where f1 is the number of distinct values that occurred
1751 * exactly once in our sample of n rows (from a total of N),
1752 * and d is the total number of distinct values in the sample.
1753 * This is their Duj1 estimator; the other estimators they
1754 * recommend are considerably more complex, and are numerically
1755 * very unstable when n is much smaller than N.
1757 * We assume (not very reliably!) that all the multiply-occurring
1758 * values are reflected in the final track[] list, and the other
1759 * nonnull values all appeared but once. (XXX this usually
1760 * results in a drastic overestimate of ndistinct. Can we do
1764 int f1
= nonnull_cnt
- summultiple
;
1765 int d
= f1
+ nmultiple
;
1770 numer
= (double) samplerows
*(double) d
;
1772 denom
= (double) (samplerows
- f1
) +
1773 (double) f1
*(double) samplerows
/ totalrows
;
1775 stadistinct
= numer
/ denom
;
1776 /* Clamp to sane range in case of roundoff error */
1777 if (stadistinct
< (double) d
)
1778 stadistinct
= (double) d
;
1779 if (stadistinct
> totalrows
)
1780 stadistinct
= totalrows
;
1781 stats
->stadistinct
= floor(stadistinct
+ 0.5);
1785 * If we estimated the number of distinct values at more than 10% of
1786 * the total row count (a very arbitrary limit), then assume that
1787 * stadistinct should scale with the row count rather than be a fixed
1790 if (stats
->stadistinct
> 0.1 * totalrows
)
1791 stats
->stadistinct
= -(stats
->stadistinct
/ totalrows
);
1794 * Decide how many values are worth storing as most-common values. If
1795 * we are able to generate a complete MCV list (all the values in the
1796 * sample will fit, and we think these are all the ones in the table),
1797 * then do so. Otherwise, store only those values that are
1798 * significantly more common than the (estimated) average. We set the
1799 * threshold rather arbitrarily at 25% more than average, with at
1800 * least 2 instances in the sample.
1802 if (track_cnt
< track_max
&& toowide_cnt
== 0 &&
1803 stats
->stadistinct
> 0 &&
1804 track_cnt
<= num_mcv
)
1806 /* Track list includes all values seen, and all will fit */
1807 num_mcv
= track_cnt
;
1811 double ndistinct
= stats
->stadistinct
;
1816 ndistinct
= -ndistinct
* totalrows
;
1817 /* estimate # of occurrences in sample of a typical value */
1818 avgcount
= (double) samplerows
/ ndistinct
;
1819 /* set minimum threshold count to store a value */
1820 mincount
= avgcount
* 1.25;
1823 if (num_mcv
> track_cnt
)
1824 num_mcv
= track_cnt
;
1825 for (i
= 0; i
< num_mcv
; i
++)
1827 if (track
[i
].count
< mincount
)
1835 /* Generate MCV slot entry */
1838 MemoryContext old_context
;
1842 /* Must copy the target values into anl_context */
1843 old_context
= MemoryContextSwitchTo(stats
->anl_context
);
1844 mcv_values
= (Datum
*) palloc(num_mcv
* sizeof(Datum
));
1845 mcv_freqs
= (float4
*) palloc(num_mcv
* sizeof(float4
));
1846 for (i
= 0; i
< num_mcv
; i
++)
1848 mcv_values
[i
] = datumCopy(track
[i
].value
,
1849 stats
->attr
->attbyval
,
1850 stats
->attr
->attlen
);
1851 mcv_freqs
[i
] = (double) track
[i
].count
/ (double) samplerows
;
1853 MemoryContextSwitchTo(old_context
);
1855 stats
->stakind
[0] = STATISTIC_KIND_MCV
;
1856 stats
->staop
[0] = mystats
->eqopr
;
1857 stats
->stanumbers
[0] = mcv_freqs
;
1858 stats
->numnumbers
[0] = num_mcv
;
1859 stats
->stavalues
[0] = mcv_values
;
1860 stats
->numvalues
[0] = num_mcv
;
1862 * Accept the defaults for stats->statypid and others.
1863 * They have been set before we were called (see vacuum.h)
1867 else if (null_cnt
> 0)
1869 /* We found only nulls; assume the column is entirely null */
1870 stats
->stats_valid
= true;
1871 stats
->stanullfrac
= 1.0;
1873 stats
->stawidth
= 0; /* "unknown" */
1875 stats
->stawidth
= stats
->attrtype
->typlen
;
1876 stats
->stadistinct
= 0.0; /* "unknown" */
1879 /* We don't need to bother cleaning up any of our temporary palloc's */
1884 * compute_scalar_stats() -- compute column statistics
1886 * We use this when we can find "=" and "<" operators for the datatype.
1888 * We determine the fraction of non-null rows, the average width, the
1889 * most common values, the (estimated) number of distinct values, the
1890 * distribution histogram, and the correlation of physical to logical order.
1892 * The desired stats can be determined fairly easily after sorting the
1893 * data values into order.
1896 compute_scalar_stats(VacAttrStatsP stats
,
1897 AnalyzeAttrFetchFunc fetchfunc
,
1903 int nonnull_cnt
= 0;
1904 int toowide_cnt
= 0;
1905 double total_width
= 0;
1906 bool is_varlena
= (!stats
->attr
->attbyval
&&
1907 stats
->attr
->attlen
== -1);
1908 bool is_varwidth
= (!stats
->attr
->attbyval
&&
1909 stats
->attr
->attlen
< 0);
1917 ScalarMCVItem
*track
;
1919 int num_mcv
= stats
->attr
->attstattarget
;
1920 int num_bins
= stats
->attr
->attstattarget
;
1921 StdAnalyzeData
*mystats
= (StdAnalyzeData
*) stats
->extra_data
;
1923 values
= (ScalarItem
*) palloc(samplerows
* sizeof(ScalarItem
));
1924 tupnoLink
= (int *) palloc(samplerows
* sizeof(int));
1925 track
= (ScalarMCVItem
*) palloc(num_mcv
* sizeof(ScalarMCVItem
));
1927 SelectSortFunction(mystats
->ltopr
, false, &cmpFn
, &cmpFlags
);
1928 fmgr_info(cmpFn
, &f_cmpfn
);
1930 /* Initial scan to find sortable values */
1931 for (i
= 0; i
< samplerows
; i
++)
1936 vacuum_delay_point();
1938 value
= fetchfunc(stats
, i
, &isnull
);
1940 /* Check for null/nonnull */
1949 * If it's a variable-width field, add up widths for average width
1950 * calculation. Note that if the value is toasted, we use the toasted
1951 * width. We don't bother with this calculation if it's a fixed-width
1956 total_width
+= VARSIZE_ANY(DatumGetPointer(value
));
1959 * If the value is toasted, we want to detoast it just once to
1960 * avoid repeated detoastings and resultant excess memory usage
1961 * during the comparisons. Also, check to see if the value is
1962 * excessively wide, and if so don't detoast at all --- just
1965 if (toast_raw_datum_size(value
) > WIDTH_THRESHOLD
)
1970 value
= PointerGetDatum(PG_DETOAST_DATUM(value
));
1972 else if (is_varwidth
)
1974 /* must be cstring */
1975 total_width
+= strlen(DatumGetCString(value
)) + 1;
1978 /* Add it to the list to be sorted */
1979 values
[values_cnt
].value
= value
;
1980 values
[values_cnt
].tupno
= values_cnt
;
1981 tupnoLink
[values_cnt
] = values_cnt
;
1985 /* We can only compute real stats if we found some sortable values. */
1988 int ndistinct
, /* # distinct values in sample */
1989 nmultiple
, /* # that appear multiple times */
1993 CompareScalarsContext cxt
;
1995 /* Sort the collected values */
1996 cxt
.cmpFn
= &f_cmpfn
;
1997 cxt
.cmpFlags
= cmpFlags
;
1998 cxt
.tupnoLink
= tupnoLink
;
1999 qsort_arg((void *) values
, values_cnt
, sizeof(ScalarItem
),
2000 compare_scalars
, (void *) &cxt
);
2003 * Now scan the values in order, find the most common ones, and also
2004 * accumulate ordering-correlation statistics.
2006 * To determine which are most common, we first have to count the
2007 * number of duplicates of each value. The duplicates are adjacent in
2008 * the sorted list, so a brute-force approach is to compare successive
2009 * datum values until we find two that are not equal. However, that
2010 * requires N-1 invocations of the datum comparison routine, which are
2011 * completely redundant with work that was done during the sort. (The
2012 * sort algorithm must at some point have compared each pair of items
2013 * that are adjacent in the sorted order; otherwise it could not know
2014 * that it's ordered the pair correctly.) We exploit this by having
2015 * compare_scalars remember the highest tupno index that each
2016 * ScalarItem has been found equal to. At the end of the sort, a
2017 * ScalarItem's tupnoLink will still point to itself if and only if it
2018 * is the last item of its group of duplicates (since the group will
2019 * be ordered by tupno).
2025 for (i
= 0; i
< values_cnt
; i
++)
2027 int tupno
= values
[i
].tupno
;
2029 corr_xysum
+= ((double) i
) * ((double) tupno
);
2031 if (tupnoLink
[tupno
] == tupno
)
2033 /* Reached end of duplicates of this value */
2038 if (track_cnt
< num_mcv
||
2039 dups_cnt
> track
[track_cnt
- 1].count
)
2042 * Found a new item for the mcv list; find its
2043 * position, bubbling down old items if needed. Loop
2044 * invariant is that j points at an empty/ replaceable
2049 if (track_cnt
< num_mcv
)
2051 for (j
= track_cnt
- 1; j
> 0; j
--)
2053 if (dups_cnt
<= track
[j
- 1].count
)
2055 track
[j
].count
= track
[j
- 1].count
;
2056 track
[j
].first
= track
[j
- 1].first
;
2058 track
[j
].count
= dups_cnt
;
2059 track
[j
].first
= i
+ 1 - dups_cnt
;
2066 stats
->stats_valid
= true;
2067 /* Do the simple null-frac and width stats */
2068 stats
->stanullfrac
= (double) null_cnt
/ (double) samplerows
;
2070 stats
->stawidth
= total_width
/ (double) nonnull_cnt
;
2072 stats
->stawidth
= stats
->attrtype
->typlen
;
2076 /* If we found no repeated values, assume it's a unique column */
2077 stats
->stadistinct
= -1.0;
2079 else if (toowide_cnt
== 0 && nmultiple
== ndistinct
)
2082 * Every value in the sample appeared more than once. Assume the
2083 * column has just these values.
2085 stats
->stadistinct
= ndistinct
;
2090 * Estimate the number of distinct values using the estimator
2091 * proposed by Haas and Stokes in IBM Research Report RJ 10025:
2092 * n*d / (n - f1 + f1*n/N)
2093 * where f1 is the number of distinct values that occurred
2094 * exactly once in our sample of n rows (from a total of N),
2095 * and d is the total number of distinct values in the sample.
2096 * This is their Duj1 estimator; the other estimators they
2097 * recommend are considerably more complex, and are numerically
2098 * very unstable when n is much smaller than N.
2100 * Overwidth values are assumed to have been distinct.
2103 int f1
= ndistinct
- nmultiple
+ toowide_cnt
;
2104 int d
= f1
+ nmultiple
;
2109 numer
= (double) samplerows
*(double) d
;
2111 denom
= (double) (samplerows
- f1
) +
2112 (double) f1
*(double) samplerows
/ totalrows
;
2114 stadistinct
= numer
/ denom
;
2115 /* Clamp to sane range in case of roundoff error */
2116 if (stadistinct
< (double) d
)
2117 stadistinct
= (double) d
;
2118 if (stadistinct
> totalrows
)
2119 stadistinct
= totalrows
;
2120 stats
->stadistinct
= floor(stadistinct
+ 0.5);
2124 * If we estimated the number of distinct values at more than 10% of
2125 * the total row count (a very arbitrary limit), then assume that
2126 * stadistinct should scale with the row count rather than be a fixed
2129 if (stats
->stadistinct
> 0.1 * totalrows
)
2130 stats
->stadistinct
= -(stats
->stadistinct
/ totalrows
);
2133 * Decide how many values are worth storing as most-common values. If
2134 * we are able to generate a complete MCV list (all the values in the
2135 * sample will fit, and we think these are all the ones in the table),
2136 * then do so. Otherwise, store only those values that are
2137 * significantly more common than the (estimated) average. We set the
2138 * threshold rather arbitrarily at 25% more than average, with at
2139 * least 2 instances in the sample. Also, we won't suppress values
2140 * that have a frequency of at least 1/K where K is the intended
2141 * number of histogram bins; such values might otherwise cause us to
2142 * emit duplicate histogram bin boundaries.
2144 if (track_cnt
== ndistinct
&& toowide_cnt
== 0 &&
2145 stats
->stadistinct
> 0 &&
2146 track_cnt
<= num_mcv
)
2148 /* Track list includes all values seen, and all will fit */
2149 num_mcv
= track_cnt
;
2153 double ndistinct
= stats
->stadistinct
;
2159 ndistinct
= -ndistinct
* totalrows
;
2160 /* estimate # of occurrences in sample of a typical value */
2161 avgcount
= (double) samplerows
/ ndistinct
;
2162 /* set minimum threshold count to store a value */
2163 mincount
= avgcount
* 1.25;
2166 /* don't let threshold exceed 1/K, however */
2167 maxmincount
= (double) samplerows
/ (double) num_bins
;
2168 if (mincount
> maxmincount
)
2169 mincount
= maxmincount
;
2170 if (num_mcv
> track_cnt
)
2171 num_mcv
= track_cnt
;
2172 for (i
= 0; i
< num_mcv
; i
++)
2174 if (track
[i
].count
< mincount
)
2182 /* Generate MCV slot entry */
2185 MemoryContext old_context
;
2189 /* Must copy the target values into anl_context */
2190 old_context
= MemoryContextSwitchTo(stats
->anl_context
);
2191 mcv_values
= (Datum
*) palloc(num_mcv
* sizeof(Datum
));
2192 mcv_freqs
= (float4
*) palloc(num_mcv
* sizeof(float4
));
2193 for (i
= 0; i
< num_mcv
; i
++)
2195 mcv_values
[i
] = datumCopy(values
[track
[i
].first
].value
,
2196 stats
->attr
->attbyval
,
2197 stats
->attr
->attlen
);
2198 mcv_freqs
[i
] = (double) track
[i
].count
/ (double) samplerows
;
2200 MemoryContextSwitchTo(old_context
);
2202 stats
->stakind
[slot_idx
] = STATISTIC_KIND_MCV
;
2203 stats
->staop
[slot_idx
] = mystats
->eqopr
;
2204 stats
->stanumbers
[slot_idx
] = mcv_freqs
;
2205 stats
->numnumbers
[slot_idx
] = num_mcv
;
2206 stats
->stavalues
[slot_idx
] = mcv_values
;
2207 stats
->numvalues
[slot_idx
] = num_mcv
;
2209 * Accept the defaults for stats->statypid and others.
2210 * They have been set before we were called (see vacuum.h)
2216 * Generate a histogram slot entry if there are at least two distinct
2217 * values not accounted for in the MCV list. (This ensures the
2218 * histogram won't collapse to empty or a singleton.)
2220 num_hist
= ndistinct
- num_mcv
;
2221 if (num_hist
> num_bins
)
2222 num_hist
= num_bins
+ 1;
2225 MemoryContext old_context
;
2229 /* Sort the MCV items into position order to speed next loop */
2230 qsort((void *) track
, num_mcv
,
2231 sizeof(ScalarMCVItem
), compare_mcvs
);
2234 * Collapse out the MCV items from the values[] array.
2236 * Note we destroy the values[] array here... but we don't need it
2237 * for anything more. We do, however, still need values_cnt.
2238 * nvals will be the number of remaining entries in values[].
2247 j
= 0; /* index of next interesting MCV item */
2248 while (src
< values_cnt
)
2254 int first
= track
[j
].first
;
2258 /* advance past this MCV item */
2259 src
= first
+ track
[j
].count
;
2263 ncopy
= first
- src
;
2266 ncopy
= values_cnt
- src
;
2267 memmove(&values
[dest
], &values
[src
],
2268 ncopy
* sizeof(ScalarItem
));
2276 Assert(nvals
>= num_hist
);
2278 /* Must copy the target values into anl_context */
2279 old_context
= MemoryContextSwitchTo(stats
->anl_context
);
2280 hist_values
= (Datum
*) palloc(num_hist
* sizeof(Datum
));
2281 for (i
= 0; i
< num_hist
; i
++)
2285 pos
= (i
* (nvals
- 1)) / (num_hist
- 1);
2286 hist_values
[i
] = datumCopy(values
[pos
].value
,
2287 stats
->attr
->attbyval
,
2288 stats
->attr
->attlen
);
2290 MemoryContextSwitchTo(old_context
);
2292 stats
->stakind
[slot_idx
] = STATISTIC_KIND_HISTOGRAM
;
2293 stats
->staop
[slot_idx
] = mystats
->ltopr
;
2294 stats
->stavalues
[slot_idx
] = hist_values
;
2295 stats
->numvalues
[slot_idx
] = num_hist
;
2297 * Accept the defaults for stats->statypid and others.
2298 * They have been set before we were called (see vacuum.h)
2303 /* Generate a correlation entry if there are multiple values */
2306 MemoryContext old_context
;
2311 /* Must copy the target values into anl_context */
2312 old_context
= MemoryContextSwitchTo(stats
->anl_context
);
2313 corrs
= (float4
*) palloc(sizeof(float4
));
2314 MemoryContextSwitchTo(old_context
);
2317 * Since we know the x and y value sets are both
2318 * 0, 1, ..., values_cnt-1
2319 * we have sum(x) = sum(y) =
2320 * (values_cnt-1)*values_cnt / 2
2321 * and sum(x^2) = sum(y^2) =
2322 * (values_cnt-1)*values_cnt*(2*values_cnt-1) / 6.
2325 corr_xsum
= ((double) (values_cnt
- 1)) *
2326 ((double) values_cnt
) / 2.0;
2327 corr_x2sum
= ((double) (values_cnt
- 1)) *
2328 ((double) values_cnt
) * (double) (2 * values_cnt
- 1) / 6.0;
2330 /* And the correlation coefficient reduces to */
2331 corrs
[0] = (values_cnt
* corr_xysum
- corr_xsum
* corr_xsum
) /
2332 (values_cnt
* corr_x2sum
- corr_xsum
* corr_xsum
);
2334 stats
->stakind
[slot_idx
] = STATISTIC_KIND_CORRELATION
;
2335 stats
->staop
[slot_idx
] = mystats
->ltopr
;
2336 stats
->stanumbers
[slot_idx
] = corrs
;
2337 stats
->numnumbers
[slot_idx
] = 1;
2341 else if (nonnull_cnt
== 0 && null_cnt
> 0)
2343 /* We found only nulls; assume the column is entirely null */
2344 stats
->stats_valid
= true;
2345 stats
->stanullfrac
= 1.0;
2347 stats
->stawidth
= 0; /* "unknown" */
2349 stats
->stawidth
= stats
->attrtype
->typlen
;
2350 stats
->stadistinct
= 0.0; /* "unknown" */
2353 /* We don't need to bother cleaning up any of our temporary palloc's */
2357 * qsort_arg comparator for sorting ScalarItems
2359 * Aside from sorting the items, we update the tupnoLink[] array
2360 * whenever two ScalarItems are found to contain equal datums. The array
2361 * is indexed by tupno; for each ScalarItem, it contains the highest
2362 * tupno that that item's datum has been found to be equal to. This allows
2363 * us to avoid additional comparisons in compute_scalar_stats().
2366 compare_scalars(const void *a
, const void *b
, void *arg
)
2368 Datum da
= ((ScalarItem
*) a
)->value
;
2369 int ta
= ((ScalarItem
*) a
)->tupno
;
2370 Datum db
= ((ScalarItem
*) b
)->value
;
2371 int tb
= ((ScalarItem
*) b
)->tupno
;
2372 CompareScalarsContext
*cxt
= (CompareScalarsContext
*) arg
;
2375 compare
= ApplySortFunction(cxt
->cmpFn
, cxt
->cmpFlags
,
2376 da
, false, db
, false);
2381 * The two datums are equal, so update cxt->tupnoLink[].
2383 if (cxt
->tupnoLink
[ta
] < tb
)
2384 cxt
->tupnoLink
[ta
] = tb
;
2385 if (cxt
->tupnoLink
[tb
] < ta
)
2386 cxt
->tupnoLink
[tb
] = ta
;
2389 * For equal datums, sort by tupno
2395 * qsort comparator for sorting ScalarMCVItems by position
2398 compare_mcvs(const void *a
, const void *b
)
2400 int da
= ((ScalarMCVItem
*) a
)->first
;
2401 int db
= ((ScalarMCVItem
*) b
)->first
;