Fix oversight in previous error-reporting patch; mustn't pfree path string
[PostgreSQL.git] / src / backend / tsearch / ts_typanalyze.c
blob3d35f47c6601526f41ebdeb8785e7bdd10f7a808
1 /*-------------------------------------------------------------------------
3 * ts_typanalyze.c
4 * functions for gathering statistics from tsvector columns
6 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
9 * IDENTIFICATION
10 * $PostgreSQL$
12 *-------------------------------------------------------------------------
14 #include "postgres.h"
16 #include "access/hash.h"
17 #include "catalog/pg_operator.h"
18 #include "commands/vacuum.h"
19 #include "tsearch/ts_type.h"
20 #include "utils/builtins.h"
21 #include "utils/hsearch.h"
24 /* A hash key for lexemes */
25 typedef struct
27 char *lexeme; /* lexeme (not NULL terminated!) */
28 int length; /* its length in bytes */
29 } LexemeHashKey;
31 /* A hash table entry for the Lossy Counting algorithm */
32 typedef struct
34 LexemeHashKey key; /* This is 'e' from the LC algorithm. */
35 int frequency; /* This is 'f'. */
36 int delta; /* And this is 'delta'. */
37 } TrackItem;
39 static void compute_tsvector_stats(VacAttrStats *stats,
40 AnalyzeAttrFetchFunc fetchfunc,
41 int samplerows,
42 double totalrows);
43 static void prune_lexemes_hashtable(HTAB *lexemes_tab, int b_current);
44 static uint32 lexeme_hash(const void *key, Size keysize);
45 static int lexeme_match(const void *key1, const void *key2, Size keysize);
46 static int lexeme_compare(const void *key1, const void *key2);
47 static int trackitem_compare_frequencies_desc(const void *e1, const void *e2);
48 static int trackitem_compare_lexemes(const void *e1, const void *e2);
52 * ts_typanalyze -- a custom typanalyze function for tsvector columns
54 Datum
55 ts_typanalyze(PG_FUNCTION_ARGS)
57 VacAttrStats *stats = (VacAttrStats *) PG_GETARG_POINTER(0);
58 Form_pg_attribute attr = stats->attr;
60 /* If the attstattarget column is negative, use the default value */
61 /* NB: it is okay to scribble on stats->attr since it's a copy */
62 if (attr->attstattarget < 0)
63 attr->attstattarget = default_statistics_target;
65 stats->compute_stats = compute_tsvector_stats;
66 /* see comment about the choice of minrows from analyze.c */
67 stats->minrows = 300 * attr->attstattarget;
69 PG_RETURN_BOOL(true);
73 * compute_tsvector_stats() -- compute statistics for a tsvector column
75 * This functions computes statistics that are useful for determining @@
76 * operations' selectivity, along with the fraction of non-null rows and
77 * average width.
79 * Instead of finding the most common values, as we do for most datatypes,
80 * we're looking for the most common lexemes. This is more useful, because
81 * there most probably won't be any two rows with the same tsvector and thus
82 * the notion of a MCV is a bit bogus with this datatype. With a list of the
83 * most common lexemes we can do a better job at figuring out @@ selectivity.
85 * For the same reasons we assume that tsvector columns are unique when
86 * determining the number of distinct values.
88 * The algorithm used is Lossy Counting, as proposed in the paper "Approximate
89 * frequency counts over data streams" by G. S. Manku and R. Motwani, in
90 * Proceedings of the 28th International Conference on Very Large Data Bases,
91 * Hong Kong, China, August 2002, section 4.2. The paper is available at
92 * http://www.vldb.org/conf/2002/S10P03.pdf
94 * The Lossy Counting (aka LC) algorithm goes like this:
95 * Let D be a set of triples (e, f, d), where e is an element value, f is
96 * that element's frequency (occurrence count) and d is the maximum error in
97 * f. We start with D empty and process the elements in batches of size
98 * w. (The batch size is also known as "bucket size".) Let the current batch
99 * number be b_current, starting with 1. For each element e we either
100 * increment its f count, if it's already in D, or insert a new triple into D
101 * with values (e, 1, b_current - 1). After processing each batch we prune D,
102 * by removing from it all elements with f + d <= b_current. Finally, we
103 * gather elements with largest f. The LC paper proves error bounds on f
104 * dependent on the batch size w, and shows that the required table size
105 * is no more than a few times w.
107 * We use a hashtable for the D structure and a bucket width of
108 * statistic_target * 100, where 100 is an arbitrarily chosen constant, meant
109 * to approximate the number of lexemes in a single tsvector.
111 static void
112 compute_tsvector_stats(VacAttrStats *stats,
113 AnalyzeAttrFetchFunc fetchfunc,
114 int samplerows,
115 double totalrows)
117 int num_mcelem;
118 int null_cnt = 0;
119 double total_width = 0;
120 /* This is D from the LC algorithm. */
121 HTAB *lexemes_tab;
122 HASHCTL hash_ctl;
123 HASH_SEQ_STATUS scan_status;
124 /* This is the current bucket number from the LC algorithm */
125 int b_current;
126 /* This is 'w' from the LC algorithm */
127 int bucket_width;
128 int vector_no,
129 lexeme_no;
130 LexemeHashKey hash_key;
131 TrackItem *item;
133 /* We want statistic_target * 100 lexemes in the MCELEM array */
134 num_mcelem = stats->attr->attstattarget * 100;
137 * We set bucket width equal to the target number of result lexemes.
138 * This is probably about right but perhaps might need to be scaled
139 * up or down a bit?
141 bucket_width = num_mcelem;
144 * Create the hashtable. It will be in local memory, so we don't need to
145 * worry about initial size too much. Also we don't need to pay any
146 * attention to locking and memory management.
148 MemSet(&hash_ctl, 0, sizeof(hash_ctl));
149 hash_ctl.keysize = sizeof(LexemeHashKey);
150 hash_ctl.entrysize = sizeof(TrackItem);
151 hash_ctl.hash = lexeme_hash;
152 hash_ctl.match = lexeme_match;
153 hash_ctl.hcxt = CurrentMemoryContext;
154 lexemes_tab = hash_create("Analyzed lexemes table",
155 bucket_width * 4,
156 &hash_ctl,
157 HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
159 /* Initialize counters. */
160 b_current = 1;
161 lexeme_no = 1;
163 /* Loop over the tsvectors. */
164 for (vector_no = 0; vector_no < samplerows; vector_no++)
166 Datum value;
167 bool isnull;
168 TSVector vector;
169 WordEntry *curentryptr;
170 char *lexemesptr;
171 int j;
173 vacuum_delay_point();
175 value = fetchfunc(stats, vector_no, &isnull);
178 * Check for null/nonnull.
180 if (isnull)
182 null_cnt++;
183 continue;
187 * Add up widths for average-width calculation. Since it's a
188 * tsvector, we know it's varlena. As in the regular
189 * compute_minimal_stats function, we use the toasted width for this
190 * calculation.
192 total_width += VARSIZE_ANY(DatumGetPointer(value));
195 * Now detoast the tsvector if needed.
197 vector = DatumGetTSVector(value);
200 * We loop through the lexemes in the tsvector and add them to our
201 * tracking hashtable. Note: the hashtable entries will point into
202 * the (detoasted) tsvector value, therefore we cannot free that
203 * storage until we're done.
205 lexemesptr = STRPTR(vector);
206 curentryptr = ARRPTR(vector);
207 for (j = 0; j < vector->size; j++)
209 bool found;
211 /* Construct a hash key */
212 hash_key.lexeme = lexemesptr + curentryptr->pos;
213 hash_key.length = curentryptr->len;
215 /* Lookup current lexeme in hashtable, adding it if new */
216 item = (TrackItem *) hash_search(lexemes_tab,
217 (const void *) &hash_key,
218 HASH_ENTER, &found);
220 if (found)
222 /* The lexeme is already on the tracking list */
223 item->frequency++;
225 else
227 /* Initialize new tracking list element */
228 item->frequency = 1;
229 item->delta = b_current - 1;
232 /* We prune the D structure after processing each bucket */
233 if (lexeme_no % bucket_width == 0)
235 prune_lexemes_hashtable(lexemes_tab, b_current);
236 b_current++;
239 /* Advance to the next WordEntry in the tsvector */
240 lexeme_no++;
241 curentryptr++;
245 /* We can only compute real stats if we found some non-null values. */
246 if (null_cnt < samplerows)
248 int nonnull_cnt = samplerows - null_cnt;
249 int i;
250 TrackItem **sort_table;
251 int track_len;
252 int minfreq, maxfreq;
254 stats->stats_valid = true;
255 /* Do the simple null-frac and average width stats */
256 stats->stanullfrac = (double) null_cnt / (double) samplerows;
257 stats->stawidth = total_width / (double) nonnull_cnt;
259 /* Assume it's a unique column (see notes above) */
260 stats->stadistinct = -1.0;
263 * Determine the top-N lexemes by simply copying pointers from the
264 * hashtable into an array and applying qsort()
266 track_len = hash_get_num_entries(lexemes_tab);
268 sort_table = (TrackItem **) palloc(sizeof(TrackItem *) * track_len);
270 hash_seq_init(&scan_status, lexemes_tab);
271 i = 0;
272 while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
274 sort_table[i++] = item;
276 Assert(i == track_len);
278 qsort(sort_table, track_len, sizeof(TrackItem *),
279 trackitem_compare_frequencies_desc);
281 /* Suppress any single-occurrence items */
282 while (track_len > 0)
284 if (sort_table[track_len-1]->frequency > 1)
285 break;
286 track_len--;
289 /* Determine the number of most common lexemes to be stored */
290 if (num_mcelem > track_len)
291 num_mcelem = track_len;
293 /* Grab the minimal and maximal frequencies that will get stored */
294 minfreq = sort_table[num_mcelem - 1]->frequency;
295 maxfreq = sort_table[0]->frequency;
298 * We want to store statistics sorted on the lexeme value using first
299 * length, then byte-for-byte comparison. The reason for doing length
300 * comparison first is that we don't care about the ordering so long
301 * as it's consistent, and comparing lengths first gives us a chance
302 * to avoid a strncmp() call.
304 * This is different from what we do with scalar statistics -- they get
305 * sorted on frequencies. The rationale is that we usually search
306 * through most common elements looking for a specific value, so we can
307 * grab its frequency. When values are presorted we can employ binary
308 * search for that. See ts_selfuncs.c for a real usage scenario.
310 qsort(sort_table, num_mcelem, sizeof(TrackItem *),
311 trackitem_compare_lexemes);
313 /* Generate MCELEM slot entry */
314 if (num_mcelem > 0)
316 MemoryContext old_context;
317 Datum *mcelem_values;
318 float4 *mcelem_freqs;
320 /* Must copy the target values into anl_context */
321 old_context = MemoryContextSwitchTo(stats->anl_context);
324 * We sorted statistics on the lexeme value, but we want to be
325 * able to find out the minimal and maximal frequency without
326 * going through all the values. We keep those two extra
327 * frequencies in two extra cells in mcelem_freqs.
329 mcelem_values = (Datum *) palloc(num_mcelem * sizeof(Datum));
330 mcelem_freqs = (float4 *) palloc((num_mcelem + 2) * sizeof(float4));
332 for (i = 0; i < num_mcelem; i++)
334 TrackItem *item = sort_table[i];
336 mcelem_values[i] =
337 PointerGetDatum(cstring_to_text_with_len(item->key.lexeme,
338 item->key.length));
339 mcelem_freqs[i] = (double) item->frequency / (double) nonnull_cnt;
341 mcelem_freqs[i++] = (double) minfreq / (double) nonnull_cnt;
342 mcelem_freqs[i] = (double) maxfreq / (double) nonnull_cnt;
343 MemoryContextSwitchTo(old_context);
345 stats->stakind[0] = STATISTIC_KIND_MCELEM;
346 stats->staop[0] = TextEqualOperator;
347 stats->stanumbers[0] = mcelem_freqs;
348 /* See above comment about two extra frequency fields */
349 stats->numnumbers[0] = num_mcelem + 2;
350 stats->stavalues[0] = mcelem_values;
351 stats->numvalues[0] = num_mcelem;
352 /* We are storing text values */
353 stats->statypid[0] = TEXTOID;
354 stats->statyplen[0] = -1; /* typlen, -1 for varlena */
355 stats->statypbyval[0] = false;
356 stats->statypalign[0] = 'i';
359 else
361 /* We found only nulls; assume the column is entirely null */
362 stats->stats_valid = true;
363 stats->stanullfrac = 1.0;
364 stats->stawidth = 0; /* "unknown" */
365 stats->stadistinct = 0.0; /* "unknown" */
369 * We don't need to bother cleaning up any of our temporary palloc's.
370 * The hashtable should also go away, as it used a child memory context.
375 * A function to prune the D structure from the Lossy Counting algorithm.
376 * Consult compute_tsvector_stats() for wider explanation.
378 static void
379 prune_lexemes_hashtable(HTAB *lexemes_tab, int b_current)
381 HASH_SEQ_STATUS scan_status;
382 TrackItem *item;
384 hash_seq_init(&scan_status, lexemes_tab);
385 while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
387 if (item->frequency + item->delta <= b_current)
389 if (hash_search(lexemes_tab, (const void *) &item->key,
390 HASH_REMOVE, NULL) == NULL)
391 elog(ERROR, "hash table corrupted");
397 * Hash functions for lexemes. They are strings, but not NULL terminated,
398 * so we need a special hash function.
400 static uint32
401 lexeme_hash(const void *key, Size keysize)
403 const LexemeHashKey *l = (const LexemeHashKey *) key;
405 return DatumGetUInt32(hash_any((const unsigned char *) l->lexeme,
406 l->length));
410 * Matching function for lexemes, to be used in hashtable lookups.
412 static int
413 lexeme_match(const void *key1, const void *key2, Size keysize)
415 /* The keysize parameter is superfluous, the keys store their lengths */
416 return lexeme_compare(key1, key2);
420 * Comparison function for lexemes.
422 static int
423 lexeme_compare(const void *key1, const void *key2)
425 const LexemeHashKey *d1 = (const LexemeHashKey *) key1;
426 const LexemeHashKey *d2 = (const LexemeHashKey *) key2;
428 /* First, compare by length */
429 if (d1->length > d2->length)
430 return 1;
431 else if (d1->length < d2->length)
432 return -1;
433 /* Lengths are equal, do a byte-by-byte comparison */
434 return strncmp(d1->lexeme, d2->lexeme, d1->length);
438 * qsort() comparator for sorting TrackItems on frequencies (descending sort)
440 static int
441 trackitem_compare_frequencies_desc(const void *e1, const void *e2)
443 const TrackItem * const *t1 = (const TrackItem * const *) e1;
444 const TrackItem * const *t2 = (const TrackItem * const *) e2;
446 return (*t2)->frequency - (*t1)->frequency;
450 * qsort() comparator for sorting TrackItems on lexemes
452 static int
453 trackitem_compare_lexemes(const void *e1, const void *e2)
455 const TrackItem * const *t1 = (const TrackItem * const *) e1;
456 const TrackItem * const *t2 = (const TrackItem * const *) e2;
458 return lexeme_compare(&(*t1)->key, &(*t2)->key);