Fix a compiler warning in initStringInfo().
[pgsql.git] / src / backend / tsearch / ts_typanalyze.c
blob1494da1c9d3851bcfcf846c26a47fd43f45dcbc3
1 /*-------------------------------------------------------------------------
3 * ts_typanalyze.c
4 * functions for gathering statistics from tsvector columns
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
9 * IDENTIFICATION
10 * src/backend/tsearch/ts_typanalyze.c
12 *-------------------------------------------------------------------------
14 #include "postgres.h"
16 #include "catalog/pg_collation.h"
17 #include "catalog/pg_operator.h"
18 #include "commands/vacuum.h"
19 #include "common/hashfn.h"
20 #include "tsearch/ts_type.h"
21 #include "utils/builtins.h"
22 #include "varatt.h"
25 /* A hash key for lexemes */
26 typedef struct
28 char *lexeme; /* lexeme (not NULL terminated!) */
29 int length; /* its length in bytes */
30 } LexemeHashKey;
32 /* A hash table entry for the Lossy Counting algorithm */
33 typedef struct
35 LexemeHashKey key; /* This is 'e' from the LC algorithm. */
36 int frequency; /* This is 'f'. */
37 int delta; /* And this is 'delta'. */
38 } TrackItem;
40 static void compute_tsvector_stats(VacAttrStats *stats,
41 AnalyzeAttrFetchFunc fetchfunc,
42 int samplerows,
43 double totalrows);
44 static void prune_lexemes_hashtable(HTAB *lexemes_tab, int b_current);
45 static uint32 lexeme_hash(const void *key, Size keysize);
46 static int lexeme_match(const void *key1, const void *key2, Size keysize);
47 static int lexeme_compare(const void *key1, const void *key2);
48 static int trackitem_compare_frequencies_desc(const void *e1, const void *e2,
49 void *arg);
50 static int trackitem_compare_lexemes(const void *e1, const void *e2,
51 void *arg);
55 * ts_typanalyze -- a custom typanalyze function for tsvector columns
57 Datum
58 ts_typanalyze(PG_FUNCTION_ARGS)
60 VacAttrStats *stats = (VacAttrStats *) PG_GETARG_POINTER(0);
62 /* If the attstattarget column is negative, use the default value */
63 if (stats->attstattarget < 0)
64 stats->attstattarget = default_statistics_target;
66 stats->compute_stats = compute_tsvector_stats;
67 /* see comment about the choice of minrows in commands/analyze.c */
68 stats->minrows = 300 * stats->attstattarget;
70 PG_RETURN_BOOL(true);
74 * compute_tsvector_stats() -- compute statistics for a tsvector column
76 * This functions computes statistics that are useful for determining @@
77 * operations' selectivity, along with the fraction of non-null rows and
78 * average width.
80 * Instead of finding the most common values, as we do for most datatypes,
81 * we're looking for the most common lexemes. This is more useful, because
82 * there most probably won't be any two rows with the same tsvector and thus
83 * the notion of a MCV is a bit bogus with this datatype. With a list of the
84 * most common lexemes we can do a better job at figuring out @@ selectivity.
86 * For the same reasons we assume that tsvector columns are unique when
87 * determining the number of distinct values.
89 * The algorithm used is Lossy Counting, as proposed in the paper "Approximate
90 * frequency counts over data streams" by G. S. Manku and R. Motwani, in
91 * Proceedings of the 28th International Conference on Very Large Data Bases,
92 * Hong Kong, China, August 2002, section 4.2. The paper is available at
93 * http://www.vldb.org/conf/2002/S10P03.pdf
95 * The Lossy Counting (aka LC) algorithm goes like this:
96 * Let s be the threshold frequency for an item (the minimum frequency we
97 * are interested in) and epsilon the error margin for the frequency. Let D
98 * be a set of triples (e, f, delta), where e is an element value, f is that
99 * element's frequency (actually, its current occurrence count) and delta is
100 * the maximum error in f. We start with D empty and process the elements in
101 * batches of size w. (The batch size is also known as "bucket size" and is
102 * equal to 1/epsilon.) Let the current batch number be b_current, starting
103 * with 1. For each element e we either increment its f count, if it's
104 * already in D, or insert a new triple into D with values (e, 1, b_current
105 * - 1). After processing each batch we prune D, by removing from it all
106 * elements with f + delta <= b_current. After the algorithm finishes we
107 * suppress all elements from D that do not satisfy f >= (s - epsilon) * N,
108 * where N is the total number of elements in the input. We emit the
109 * remaining elements with estimated frequency f/N. The LC paper proves
110 * that this algorithm finds all elements with true frequency at least s,
111 * and that no frequency is overestimated or is underestimated by more than
112 * epsilon. Furthermore, given reasonable assumptions about the input
113 * distribution, the required table size is no more than about 7 times w.
115 * We set s to be the estimated frequency of the K'th word in a natural
116 * language's frequency table, where K is the target number of entries in
117 * the MCELEM array plus an arbitrary constant, meant to reflect the fact
118 * that the most common words in any language would usually be stopwords
119 * so we will not actually see them in the input. We assume that the
120 * distribution of word frequencies (including the stopwords) follows Zipf's
121 * law with an exponent of 1.
123 * Assuming Zipfian distribution, the frequency of the K'th word is equal
124 * to 1/(K * H(W)) where H(n) is 1/2 + 1/3 + ... + 1/n and W is the number of
125 * words in the language. Putting W as one million, we get roughly 0.07/K.
126 * Assuming top 10 words are stopwords gives s = 0.07/(K + 10). We set
127 * epsilon = s/10, which gives bucket width w = (K + 10)/0.007 and
128 * maximum expected hashtable size of about 1000 * (K + 10).
130 * Note: in the above discussion, s, epsilon, and f/N are in terms of a
131 * lexeme's frequency as a fraction of all lexemes seen in the input.
132 * However, what we actually want to store in the finished pg_statistic
133 * entry is each lexeme's frequency as a fraction of all rows that it occurs
134 * in. Assuming that the input tsvectors are correctly constructed, no
135 * lexeme occurs more than once per tsvector, so the final count f is a
136 * correct estimate of the number of input tsvectors it occurs in, and we
137 * need only change the divisor from N to nonnull_cnt to get the number we
138 * want.
140 static void
141 compute_tsvector_stats(VacAttrStats *stats,
142 AnalyzeAttrFetchFunc fetchfunc,
143 int samplerows,
144 double totalrows)
146 int num_mcelem;
147 int null_cnt = 0;
148 double total_width = 0;
150 /* This is D from the LC algorithm. */
151 HTAB *lexemes_tab;
152 HASHCTL hash_ctl;
153 HASH_SEQ_STATUS scan_status;
155 /* This is the current bucket number from the LC algorithm */
156 int b_current;
158 /* This is 'w' from the LC algorithm */
159 int bucket_width;
160 int vector_no,
161 lexeme_no;
162 LexemeHashKey hash_key;
165 * We want statistics_target * 10 lexemes in the MCELEM array. This
166 * multiplier is pretty arbitrary, but is meant to reflect the fact that
167 * the number of individual lexeme values tracked in pg_statistic ought to
168 * be more than the number of values for a simple scalar column.
170 num_mcelem = stats->attstattarget * 10;
173 * We set bucket width equal to (num_mcelem + 10) / 0.007 as per the
174 * comment above.
176 bucket_width = (num_mcelem + 10) * 1000 / 7;
179 * Create the hashtable. It will be in local memory, so we don't need to
180 * worry about overflowing the initial size. Also we don't need to pay any
181 * attention to locking and memory management.
183 hash_ctl.keysize = sizeof(LexemeHashKey);
184 hash_ctl.entrysize = sizeof(TrackItem);
185 hash_ctl.hash = lexeme_hash;
186 hash_ctl.match = lexeme_match;
187 hash_ctl.hcxt = CurrentMemoryContext;
188 lexemes_tab = hash_create("Analyzed lexemes table",
189 num_mcelem,
190 &hash_ctl,
191 HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
193 /* Initialize counters. */
194 b_current = 1;
195 lexeme_no = 0;
197 /* Loop over the tsvectors. */
198 for (vector_no = 0; vector_no < samplerows; vector_no++)
200 Datum value;
201 bool isnull;
202 TSVector vector;
203 WordEntry *curentryptr;
204 char *lexemesptr;
205 int j;
207 vacuum_delay_point();
209 value = fetchfunc(stats, vector_no, &isnull);
212 * Check for null/nonnull.
214 if (isnull)
216 null_cnt++;
217 continue;
221 * Add up widths for average-width calculation. Since it's a
222 * tsvector, we know it's varlena. As in the regular
223 * compute_minimal_stats function, we use the toasted width for this
224 * calculation.
226 total_width += VARSIZE_ANY(DatumGetPointer(value));
229 * Now detoast the tsvector if needed.
231 vector = DatumGetTSVector(value);
234 * We loop through the lexemes in the tsvector and add them to our
235 * tracking hashtable.
237 lexemesptr = STRPTR(vector);
238 curentryptr = ARRPTR(vector);
239 for (j = 0; j < vector->size; j++)
241 TrackItem *item;
242 bool found;
245 * Construct a hash key. The key points into the (detoasted)
246 * tsvector value at this point, but if a new entry is created, we
247 * make a copy of it. This way we can free the tsvector value
248 * once we've processed all its lexemes.
250 hash_key.lexeme = lexemesptr + curentryptr->pos;
251 hash_key.length = curentryptr->len;
253 /* Lookup current lexeme in hashtable, adding it if new */
254 item = (TrackItem *) hash_search(lexemes_tab,
255 &hash_key,
256 HASH_ENTER, &found);
258 if (found)
260 /* The lexeme is already on the tracking list */
261 item->frequency++;
263 else
265 /* Initialize new tracking list element */
266 item->frequency = 1;
267 item->delta = b_current - 1;
269 item->key.lexeme = palloc(hash_key.length);
270 memcpy(item->key.lexeme, hash_key.lexeme, hash_key.length);
273 /* lexeme_no is the number of elements processed (ie N) */
274 lexeme_no++;
276 /* We prune the D structure after processing each bucket */
277 if (lexeme_no % bucket_width == 0)
279 prune_lexemes_hashtable(lexemes_tab, b_current);
280 b_current++;
283 /* Advance to the next WordEntry in the tsvector */
284 curentryptr++;
287 /* If the vector was toasted, free the detoasted copy. */
288 if (TSVectorGetDatum(vector) != value)
289 pfree(vector);
292 /* We can only compute real stats if we found some non-null values. */
293 if (null_cnt < samplerows)
295 int nonnull_cnt = samplerows - null_cnt;
296 int i;
297 TrackItem **sort_table;
298 TrackItem *item;
299 int track_len;
300 int cutoff_freq;
301 int minfreq,
302 maxfreq;
304 stats->stats_valid = true;
305 /* Do the simple null-frac and average width stats */
306 stats->stanullfrac = (double) null_cnt / (double) samplerows;
307 stats->stawidth = total_width / (double) nonnull_cnt;
309 /* Assume it's a unique column (see notes above) */
310 stats->stadistinct = -1.0 * (1.0 - stats->stanullfrac);
313 * Construct an array of the interesting hashtable items, that is,
314 * those meeting the cutoff frequency (s - epsilon)*N. Also identify
315 * the minimum and maximum frequencies among these items.
317 * Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
318 * frequency is 9*N / bucket_width.
320 cutoff_freq = 9 * lexeme_no / bucket_width;
322 i = hash_get_num_entries(lexemes_tab); /* surely enough space */
323 sort_table = (TrackItem **) palloc(sizeof(TrackItem *) * i);
325 hash_seq_init(&scan_status, lexemes_tab);
326 track_len = 0;
327 minfreq = lexeme_no;
328 maxfreq = 0;
329 while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
331 if (item->frequency > cutoff_freq)
333 sort_table[track_len++] = item;
334 minfreq = Min(minfreq, item->frequency);
335 maxfreq = Max(maxfreq, item->frequency);
338 Assert(track_len <= i);
340 /* emit some statistics for debug purposes */
341 elog(DEBUG3, "tsvector_stats: target # mces = %d, bucket width = %d, "
342 "# lexemes = %d, hashtable size = %d, usable entries = %d",
343 num_mcelem, bucket_width, lexeme_no, i, track_len);
346 * If we obtained more lexemes than we really want, get rid of those
347 * with least frequencies. The easiest way is to qsort the array into
348 * descending frequency order and truncate the array.
350 if (num_mcelem < track_len)
352 qsort_interruptible(sort_table, track_len, sizeof(TrackItem *),
353 trackitem_compare_frequencies_desc, NULL);
354 /* reset minfreq to the smallest frequency we're keeping */
355 minfreq = sort_table[num_mcelem - 1]->frequency;
357 else
358 num_mcelem = track_len;
360 /* Generate MCELEM slot entry */
361 if (num_mcelem > 0)
363 MemoryContext old_context;
364 Datum *mcelem_values;
365 float4 *mcelem_freqs;
368 * We want to store statistics sorted on the lexeme value using
369 * first length, then byte-for-byte comparison. The reason for
370 * doing length comparison first is that we don't care about the
371 * ordering so long as it's consistent, and comparing lengths
372 * first gives us a chance to avoid a strncmp() call.
374 * This is different from what we do with scalar statistics --
375 * they get sorted on frequencies. The rationale is that we
376 * usually search through most common elements looking for a
377 * specific value, so we can grab its frequency. When values are
378 * presorted we can employ binary search for that. See
379 * ts_selfuncs.c for a real usage scenario.
381 qsort_interruptible(sort_table, num_mcelem, sizeof(TrackItem *),
382 trackitem_compare_lexemes, NULL);
384 /* Must copy the target values into anl_context */
385 old_context = MemoryContextSwitchTo(stats->anl_context);
388 * We sorted statistics on the lexeme value, but we want to be
389 * able to find out the minimal and maximal frequency without
390 * going through all the values. We keep those two extra
391 * frequencies in two extra cells in mcelem_freqs.
393 * (Note: the MCELEM statistics slot definition allows for a third
394 * extra number containing the frequency of nulls, but we don't
395 * create that for a tsvector column, since null elements aren't
396 * possible.)
398 mcelem_values = (Datum *) palloc(num_mcelem * sizeof(Datum));
399 mcelem_freqs = (float4 *) palloc((num_mcelem + 2) * sizeof(float4));
402 * See comments above about use of nonnull_cnt as the divisor for
403 * the final frequency estimates.
405 for (i = 0; i < num_mcelem; i++)
407 TrackItem *titem = sort_table[i];
409 mcelem_values[i] =
410 PointerGetDatum(cstring_to_text_with_len(titem->key.lexeme,
411 titem->key.length));
412 mcelem_freqs[i] = (double) titem->frequency / (double) nonnull_cnt;
414 mcelem_freqs[i++] = (double) minfreq / (double) nonnull_cnt;
415 mcelem_freqs[i] = (double) maxfreq / (double) nonnull_cnt;
416 MemoryContextSwitchTo(old_context);
418 stats->stakind[0] = STATISTIC_KIND_MCELEM;
419 stats->staop[0] = TextEqualOperator;
420 stats->stacoll[0] = DEFAULT_COLLATION_OID;
421 stats->stanumbers[0] = mcelem_freqs;
422 /* See above comment about two extra frequency fields */
423 stats->numnumbers[0] = num_mcelem + 2;
424 stats->stavalues[0] = mcelem_values;
425 stats->numvalues[0] = num_mcelem;
426 /* We are storing text values */
427 stats->statypid[0] = TEXTOID;
428 stats->statyplen[0] = -1; /* typlen, -1 for varlena */
429 stats->statypbyval[0] = false;
430 stats->statypalign[0] = 'i';
433 else
435 /* We found only nulls; assume the column is entirely null */
436 stats->stats_valid = true;
437 stats->stanullfrac = 1.0;
438 stats->stawidth = 0; /* "unknown" */
439 stats->stadistinct = 0.0; /* "unknown" */
443 * We don't need to bother cleaning up any of our temporary palloc's. The
444 * hashtable should also go away, as it used a child memory context.
449 * A function to prune the D structure from the Lossy Counting algorithm.
450 * Consult compute_tsvector_stats() for wider explanation.
452 static void
453 prune_lexemes_hashtable(HTAB *lexemes_tab, int b_current)
455 HASH_SEQ_STATUS scan_status;
456 TrackItem *item;
458 hash_seq_init(&scan_status, lexemes_tab);
459 while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
461 if (item->frequency + item->delta <= b_current)
463 char *lexeme = item->key.lexeme;
465 if (hash_search(lexemes_tab, &item->key,
466 HASH_REMOVE, NULL) == NULL)
467 elog(ERROR, "hash table corrupted");
468 pfree(lexeme);
474 * Hash functions for lexemes. They are strings, but not NULL terminated,
475 * so we need a special hash function.
477 static uint32
478 lexeme_hash(const void *key, Size keysize)
480 const LexemeHashKey *l = (const LexemeHashKey *) key;
482 return DatumGetUInt32(hash_any((const unsigned char *) l->lexeme,
483 l->length));
487 * Matching function for lexemes, to be used in hashtable lookups.
489 static int
490 lexeme_match(const void *key1, const void *key2, Size keysize)
492 /* The keysize parameter is superfluous, the keys store their lengths */
493 return lexeme_compare(key1, key2);
497 * Comparison function for lexemes.
499 static int
500 lexeme_compare(const void *key1, const void *key2)
502 const LexemeHashKey *d1 = (const LexemeHashKey *) key1;
503 const LexemeHashKey *d2 = (const LexemeHashKey *) key2;
505 /* First, compare by length */
506 if (d1->length > d2->length)
507 return 1;
508 else if (d1->length < d2->length)
509 return -1;
510 /* Lengths are equal, do a byte-by-byte comparison */
511 return strncmp(d1->lexeme, d2->lexeme, d1->length);
515 * Comparator for sorting TrackItems on frequencies (descending sort)
517 static int
518 trackitem_compare_frequencies_desc(const void *e1, const void *e2, void *arg)
520 const TrackItem *const *t1 = (const TrackItem *const *) e1;
521 const TrackItem *const *t2 = (const TrackItem *const *) e2;
523 return (*t2)->frequency - (*t1)->frequency;
527 * Comparator for sorting TrackItems on lexemes
529 static int
530 trackitem_compare_lexemes(const void *e1, const void *e2, void *arg)
532 const TrackItem *const *t1 = (const TrackItem *const *) e1;
533 const TrackItem *const *t2 = (const TrackItem *const *) e2;
535 return lexeme_compare(&(*t1)->key, &(*t2)->key);