1 /*-------------------------------------------------------------------------
4 * Functions for gathering statistics from array columns
6 * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/utils/adt/array_typanalyze.c
13 *-------------------------------------------------------------------------
17 #include "access/detoast.h"
18 #include "commands/vacuum.h"
19 #include "utils/array.h"
20 #include "utils/datum.h"
21 #include "utils/fmgrprotos.h"
22 #include "utils/lsyscache.h"
23 #include "utils/typcache.h"
27 * To avoid consuming too much memory, IO and CPU load during analysis, and/or
28 * too much space in the resulting pg_statistic rows, we ignore arrays that
29 * are wider than ARRAY_WIDTH_THRESHOLD (after detoasting!). Note that this
30 * number is considerably more than the similar WIDTH_THRESHOLD limit used
31 * in analyze.c's standard typanalyze code.
33 #define ARRAY_WIDTH_THRESHOLD 0x10000
35 /* Extra data for compute_array_stats function */
38 /* Information about array element type */
39 Oid type_id
; /* element type's OID */
40 Oid eq_opr
; /* default equality operator's OID */
41 Oid coll_id
; /* collation to use */
42 bool typbyval
; /* physical properties of element type */
47 * Lookup data for element type's comparison and hash functions (these are
48 * in the type's typcache entry, which we expect to remain valid over the
49 * lifespan of the ANALYZE run)
54 /* Saved state from std_typanalyze() */
55 AnalyzeAttrComputeStatsFunc std_compute_stats
;
57 } ArrayAnalyzeExtraData
;
60 * While compute_array_stats is running, we keep a pointer to the extra data
61 * here for use by assorted subroutines. compute_array_stats doesn't
62 * currently need to be re-entrant, so avoiding this is not worth the extra
63 * notational cruft that would be needed.
65 static ArrayAnalyzeExtraData
*array_extra_data
;
67 /* A hash table entry for the Lossy Counting algorithm */
70 Datum key
; /* This is 'e' from the LC algorithm. */
71 int frequency
; /* This is 'f'. */
72 int delta
; /* And this is 'delta'. */
73 int last_container
; /* For de-duplication of array elements. */
76 /* A hash table entry for distinct-elements counts */
79 int count
; /* Count of distinct elements in an array */
80 int frequency
; /* Number of arrays seen with this count */
83 static void compute_array_stats(VacAttrStats
*stats
,
84 AnalyzeAttrFetchFunc fetchfunc
, int samplerows
, double totalrows
);
85 static void prune_element_hashtable(HTAB
*elements_tab
, int b_current
);
86 static uint32
element_hash(const void *key
, Size keysize
);
87 static int element_match(const void *key1
, const void *key2
, Size keysize
);
88 static int element_compare(const void *key1
, const void *key2
);
89 static int trackitem_compare_frequencies_desc(const void *e1
, const void *e2
, void *arg
);
90 static int trackitem_compare_element(const void *e1
, const void *e2
, void *arg
);
91 static int countitem_compare_count(const void *e1
, const void *e2
, void *arg
);
95 * array_typanalyze -- typanalyze function for array columns
98 array_typanalyze(PG_FUNCTION_ARGS
)
100 VacAttrStats
*stats
= (VacAttrStats
*) PG_GETARG_POINTER(0);
102 TypeCacheEntry
*typentry
;
103 ArrayAnalyzeExtraData
*extra_data
;
106 * Call the standard typanalyze function. It may fail to find needed
107 * operators, in which case we also can't do anything, so just fail.
109 if (!std_typanalyze(stats
))
110 PG_RETURN_BOOL(false);
113 * Check attribute data type is a varlena array (or a domain over one).
115 element_typeid
= get_base_element_type(stats
->attrtypid
);
116 if (!OidIsValid(element_typeid
))
117 elog(ERROR
, "array_typanalyze was invoked for non-array type %u",
121 * Gather information about the element type. If we fail to find
122 * something, return leaving the state from std_typanalyze() in place.
124 typentry
= lookup_type_cache(element_typeid
,
126 TYPECACHE_CMP_PROC_FINFO
|
127 TYPECACHE_HASH_PROC_FINFO
);
129 if (!OidIsValid(typentry
->eq_opr
) ||
130 !OidIsValid(typentry
->cmp_proc_finfo
.fn_oid
) ||
131 !OidIsValid(typentry
->hash_proc_finfo
.fn_oid
))
132 PG_RETURN_BOOL(true);
134 /* Store our findings for use by compute_array_stats() */
135 extra_data
= (ArrayAnalyzeExtraData
*) palloc(sizeof(ArrayAnalyzeExtraData
));
136 extra_data
->type_id
= typentry
->type_id
;
137 extra_data
->eq_opr
= typentry
->eq_opr
;
138 extra_data
->coll_id
= stats
->attrcollid
; /* collation we should use */
139 extra_data
->typbyval
= typentry
->typbyval
;
140 extra_data
->typlen
= typentry
->typlen
;
141 extra_data
->typalign
= typentry
->typalign
;
142 extra_data
->cmp
= &typentry
->cmp_proc_finfo
;
143 extra_data
->hash
= &typentry
->hash_proc_finfo
;
145 /* Save old compute_stats and extra_data for scalar statistics ... */
146 extra_data
->std_compute_stats
= stats
->compute_stats
;
147 extra_data
->std_extra_data
= stats
->extra_data
;
149 /* ... and replace with our info */
150 stats
->compute_stats
= compute_array_stats
;
151 stats
->extra_data
= extra_data
;
154 * Note we leave stats->minrows set as std_typanalyze set it. Should it
155 * be increased for array analysis purposes?
158 PG_RETURN_BOOL(true);
162 * compute_array_stats() -- compute statistics for an array column
164 * This function computes statistics useful for determining selectivity of
165 * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
166 * compute_stats hook after sample rows have been collected.
168 * We also invoke the standard compute_stats function, which will compute
169 * "scalar" statistics relevant to the btree-style array comparison operators.
170 * However, exact duplicates of an entire array may be rare despite many
171 * arrays sharing individual elements. This especially afflicts long arrays,
172 * which are also liable to lack all scalar statistics due to the low
173 * WIDTH_THRESHOLD used in analyze.c. So, in addition to the standard stats,
174 * we find the most common array elements and compute a histogram of distinct
177 * The algorithm used is Lossy Counting, as proposed in the paper "Approximate
178 * frequency counts over data streams" by G. S. Manku and R. Motwani, in
179 * Proceedings of the 28th International Conference on Very Large Data Bases,
180 * Hong Kong, China, August 2002, section 4.2. The paper is available at
181 * http://www.vldb.org/conf/2002/S10P03.pdf
183 * The Lossy Counting (aka LC) algorithm goes like this:
184 * Let s be the threshold frequency for an item (the minimum frequency we
185 * are interested in) and epsilon the error margin for the frequency. Let D
186 * be a set of triples (e, f, delta), where e is an element value, f is that
187 * element's frequency (actually, its current occurrence count) and delta is
188 * the maximum error in f. We start with D empty and process the elements in
189 * batches of size w. (The batch size is also known as "bucket size" and is
190 * equal to 1/epsilon.) Let the current batch number be b_current, starting
191 * with 1. For each element e we either increment its f count, if it's
192 * already in D, or insert a new triple into D with values (e, 1, b_current
193 * - 1). After processing each batch we prune D, by removing from it all
194 * elements with f + delta <= b_current. After the algorithm finishes we
195 * suppress all elements from D that do not satisfy f >= (s - epsilon) * N,
196 * where N is the total number of elements in the input. We emit the
197 * remaining elements with estimated frequency f/N. The LC paper proves
198 * that this algorithm finds all elements with true frequency at least s,
199 * and that no frequency is overestimated or is underestimated by more than
200 * epsilon. Furthermore, given reasonable assumptions about the input
201 * distribution, the required table size is no more than about 7 times w.
203 * In the absence of a principled basis for other particular values, we
204 * follow ts_typanalyze() and use parameters s = 0.07/K, epsilon = s/10.
205 * But we leave out the correction for stopwords, which do not apply to
206 * arrays. These parameters give bucket width w = K/0.007 and maximum
207 * expected hashtable size of about 1000 * K.
209 * Elements may repeat within an array. Since duplicates do not change the
210 * behavior of <@, && or @>, we want to count each element only once per
211 * array. Therefore, we store in the finished pg_statistic entry each
212 * element's frequency as the fraction of all non-null rows that contain it.
213 * We divide the raw counts by nonnull_cnt to get those figures.
216 compute_array_stats(VacAttrStats
*stats
, AnalyzeAttrFetchFunc fetchfunc
,
217 int samplerows
, double totalrows
)
219 ArrayAnalyzeExtraData
*extra_data
;
221 int null_elem_cnt
= 0;
222 int analyzed_rows
= 0;
224 /* This is D from the LC algorithm. */
226 HASHCTL elem_hash_ctl
;
227 HASH_SEQ_STATUS scan_status
;
229 /* This is the current bucket number from the LC algorithm */
232 /* This is 'w' from the LC algorithm */
239 HASHCTL count_hash_ctl
;
240 DECountItem
*count_item
;
242 extra_data
= (ArrayAnalyzeExtraData
*) stats
->extra_data
;
245 * Invoke analyze.c's standard analysis function to create scalar-style
246 * stats for the column. It will expect its own extra_data pointer, so
247 * temporarily install that.
249 stats
->extra_data
= extra_data
->std_extra_data
;
250 extra_data
->std_compute_stats(stats
, fetchfunc
, samplerows
, totalrows
);
251 stats
->extra_data
= extra_data
;
254 * Set up static pointer for use by subroutines. We wait till here in
255 * case std_compute_stats somehow recursively invokes us (probably not
258 array_extra_data
= extra_data
;
261 * We want statistics_target * 10 elements in the MCELEM array. This
262 * multiplier is pretty arbitrary, but is meant to reflect the fact that
263 * the number of individual elements tracked in pg_statistic ought to be
264 * more than the number of values for a simple scalar column.
266 num_mcelem
= stats
->attstattarget
* 10;
269 * We set bucket width equal to num_mcelem / 0.007 as per the comment
272 bucket_width
= num_mcelem
* 1000 / 7;
275 * Create the hashtable. It will be in local memory, so we don't need to
276 * worry about overflowing the initial size. Also we don't need to pay any
277 * attention to locking and memory management.
279 elem_hash_ctl
.keysize
= sizeof(Datum
);
280 elem_hash_ctl
.entrysize
= sizeof(TrackItem
);
281 elem_hash_ctl
.hash
= element_hash
;
282 elem_hash_ctl
.match
= element_match
;
283 elem_hash_ctl
.hcxt
= CurrentMemoryContext
;
284 elements_tab
= hash_create("Analyzed elements table",
287 HASH_ELEM
| HASH_FUNCTION
| HASH_COMPARE
| HASH_CONTEXT
);
289 /* hashtable for array distinct elements counts */
290 count_hash_ctl
.keysize
= sizeof(int);
291 count_hash_ctl
.entrysize
= sizeof(DECountItem
);
292 count_hash_ctl
.hcxt
= CurrentMemoryContext
;
293 count_tab
= hash_create("Array distinct element count table",
296 HASH_ELEM
| HASH_BLOBS
| HASH_CONTEXT
);
298 /* Initialize counters. */
302 /* Loop over the arrays. */
303 for (array_no
= 0; array_no
< samplerows
; array_no
++)
313 int64 prev_element_no
= element_no
;
315 bool count_item_found
;
317 vacuum_delay_point();
319 value
= fetchfunc(stats
, array_no
, &isnull
);
322 /* ignore arrays that are null overall */
326 /* Skip too-large values. */
327 if (toast_raw_datum_size(value
) > ARRAY_WIDTH_THRESHOLD
)
333 * Now detoast the array if needed, and deconstruct into datums.
335 array
= DatumGetArrayTypeP(value
);
337 Assert(ARR_ELEMTYPE(array
) == extra_data
->type_id
);
338 deconstruct_array(array
,
341 extra_data
->typbyval
,
342 extra_data
->typalign
,
343 &elem_values
, &elem_nulls
, &num_elems
);
346 * We loop through the elements in the array and add them to our
347 * tracking hashtable.
349 null_present
= false;
350 for (j
= 0; j
< num_elems
; j
++)
355 /* No null element processing other than flag setting here */
362 /* Lookup current element in hashtable, adding it if new */
363 elem_value
= elem_values
[j
];
364 item
= (TrackItem
*) hash_search(elements_tab
,
370 /* The element value is already on the tracking list */
373 * The operators we assist ignore duplicate array elements, so
374 * count a given distinct element only once per array.
376 if (item
->last_container
== array_no
)
380 item
->last_container
= array_no
;
384 /* Initialize new tracking list element */
387 * If element type is pass-by-reference, we must copy it into
388 * palloc'd space, so that we can release the array below. (We
389 * do this so that the space needed for element values is
390 * limited by the size of the hashtable; if we kept all the
391 * array values around, it could be much more.)
393 item
->key
= datumCopy(elem_value
,
394 extra_data
->typbyval
,
398 item
->delta
= b_current
- 1;
399 item
->last_container
= array_no
;
402 /* element_no is the number of elements processed (ie N) */
405 /* We prune the D structure after processing each bucket */
406 if (element_no
% bucket_width
== 0)
408 prune_element_hashtable(elements_tab
, b_current
);
413 /* Count null element presence once per array. */
417 /* Update frequency of the particular array distinct element count. */
418 distinct_count
= (int) (element_no
- prev_element_no
);
419 count_item
= (DECountItem
*) hash_search(count_tab
, &distinct_count
,
423 if (count_item_found
)
424 count_item
->frequency
++;
426 count_item
->frequency
= 1;
428 /* Free memory allocated while detoasting. */
429 if (PointerGetDatum(array
) != value
)
435 /* Skip pg_statistic slots occupied by standard statistics */
437 while (slot_idx
< STATISTIC_NUM_SLOTS
&& stats
->stakind
[slot_idx
] != 0)
439 if (slot_idx
> STATISTIC_NUM_SLOTS
- 2)
440 elog(ERROR
, "insufficient pg_statistic slots for array stats");
442 /* We can only compute real stats if we found some non-null values. */
443 if (analyzed_rows
> 0)
445 int nonnull_cnt
= analyzed_rows
;
446 int count_items_count
;
448 TrackItem
**sort_table
;
455 * We assume the standard stats code already took care of setting
456 * stats_valid, stanullfrac, stawidth, stadistinct. We'd have to
457 * re-compute those values if we wanted to not store the standard
462 * Construct an array of the interesting hashtable items, that is,
463 * those meeting the cutoff frequency (s - epsilon)*N. Also identify
464 * the minimum and maximum frequencies among these items.
466 * Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
467 * frequency is 9*N / bucket_width.
469 cutoff_freq
= 9 * element_no
/ bucket_width
;
471 i
= hash_get_num_entries(elements_tab
); /* surely enough space */
472 sort_table
= (TrackItem
**) palloc(sizeof(TrackItem
*) * i
);
474 hash_seq_init(&scan_status
, elements_tab
);
476 minfreq
= element_no
;
478 while ((item
= (TrackItem
*) hash_seq_search(&scan_status
)) != NULL
)
480 if (item
->frequency
> cutoff_freq
)
482 sort_table
[track_len
++] = item
;
483 minfreq
= Min(minfreq
, item
->frequency
);
484 maxfreq
= Max(maxfreq
, item
->frequency
);
487 Assert(track_len
<= i
);
489 /* emit some statistics for debug purposes */
490 elog(DEBUG3
, "compute_array_stats: target # mces = %d, "
491 "bucket width = %d, "
492 "# elements = " INT64_FORMAT
", hashtable size = %d, "
493 "usable entries = %d",
494 num_mcelem
, bucket_width
, element_no
, i
, track_len
);
497 * If we obtained more elements than we really want, get rid of those
498 * with least frequencies. The easiest way is to qsort the array into
499 * descending frequency order and truncate the array.
501 if (num_mcelem
< track_len
)
503 qsort_interruptible(sort_table
, track_len
, sizeof(TrackItem
*),
504 trackitem_compare_frequencies_desc
, NULL
);
505 /* reset minfreq to the smallest frequency we're keeping */
506 minfreq
= sort_table
[num_mcelem
- 1]->frequency
;
509 num_mcelem
= track_len
;
511 /* Generate MCELEM slot entry */
514 MemoryContext old_context
;
515 Datum
*mcelem_values
;
516 float4
*mcelem_freqs
;
519 * We want to store statistics sorted on the element value using
520 * the element type's default comparison function. This permits
521 * fast binary searches in selectivity estimation functions.
523 qsort_interruptible(sort_table
, num_mcelem
, sizeof(TrackItem
*),
524 trackitem_compare_element
, NULL
);
526 /* Must copy the target values into anl_context */
527 old_context
= MemoryContextSwitchTo(stats
->anl_context
);
530 * We sorted statistics on the element value, but we want to be
531 * able to find the minimal and maximal frequencies without going
532 * through all the values. We also want the frequency of null
533 * elements. Store these three values at the end of mcelem_freqs.
535 mcelem_values
= (Datum
*) palloc(num_mcelem
* sizeof(Datum
));
536 mcelem_freqs
= (float4
*) palloc((num_mcelem
+ 3) * sizeof(float4
));
539 * See comments above about use of nonnull_cnt as the divisor for
540 * the final frequency estimates.
542 for (i
= 0; i
< num_mcelem
; i
++)
544 TrackItem
*titem
= sort_table
[i
];
546 mcelem_values
[i
] = datumCopy(titem
->key
,
547 extra_data
->typbyval
,
549 mcelem_freqs
[i
] = (double) titem
->frequency
/
550 (double) nonnull_cnt
;
552 mcelem_freqs
[i
++] = (double) minfreq
/ (double) nonnull_cnt
;
553 mcelem_freqs
[i
++] = (double) maxfreq
/ (double) nonnull_cnt
;
554 mcelem_freqs
[i
++] = (double) null_elem_cnt
/ (double) nonnull_cnt
;
556 MemoryContextSwitchTo(old_context
);
558 stats
->stakind
[slot_idx
] = STATISTIC_KIND_MCELEM
;
559 stats
->staop
[slot_idx
] = extra_data
->eq_opr
;
560 stats
->stacoll
[slot_idx
] = extra_data
->coll_id
;
561 stats
->stanumbers
[slot_idx
] = mcelem_freqs
;
562 /* See above comment about extra stanumber entries */
563 stats
->numnumbers
[slot_idx
] = num_mcelem
+ 3;
564 stats
->stavalues
[slot_idx
] = mcelem_values
;
565 stats
->numvalues
[slot_idx
] = num_mcelem
;
566 /* We are storing values of element type */
567 stats
->statypid
[slot_idx
] = extra_data
->type_id
;
568 stats
->statyplen
[slot_idx
] = extra_data
->typlen
;
569 stats
->statypbyval
[slot_idx
] = extra_data
->typbyval
;
570 stats
->statypalign
[slot_idx
] = extra_data
->typalign
;
574 /* Generate DECHIST slot entry */
575 count_items_count
= hash_get_num_entries(count_tab
);
576 if (count_items_count
> 0)
578 int num_hist
= stats
->attstattarget
;
579 DECountItem
**sorted_count_items
;
585 /* num_hist must be at least 2 for the loop below to work */
586 num_hist
= Max(num_hist
, 2);
589 * Create an array of DECountItem pointers, and sort them into
590 * increasing count order.
592 sorted_count_items
= (DECountItem
**)
593 palloc(sizeof(DECountItem
*) * count_items_count
);
594 hash_seq_init(&scan_status
, count_tab
);
596 while ((count_item
= (DECountItem
*) hash_seq_search(&scan_status
)) != NULL
)
598 sorted_count_items
[j
++] = count_item
;
600 qsort_interruptible(sorted_count_items
, count_items_count
,
601 sizeof(DECountItem
*),
602 countitem_compare_count
, NULL
);
605 * Prepare to fill stanumbers with the histogram, followed by the
606 * average count. This array must be stored in anl_context.
609 MemoryContextAlloc(stats
->anl_context
,
610 sizeof(float4
) * (num_hist
+ 1));
611 hist
[num_hist
] = (double) element_no
/ (double) nonnull_cnt
;
614 * Construct the histogram of distinct-element counts (DECs).
616 * The object of this loop is to copy the min and max DECs to
617 * hist[0] and hist[num_hist - 1], along with evenly-spaced DECs
618 * in between (where "evenly-spaced" is with reference to the
619 * whole input population of arrays). If we had a complete sorted
620 * array of DECs, one per analyzed row, the i'th hist value would
621 * come from DECs[i * (analyzed_rows - 1) / (num_hist - 1)]
622 * (compare the histogram-making loop in compute_scalar_stats()).
623 * But instead of that we have the sorted_count_items[] array,
624 * which holds unique DEC values with their frequencies (that is,
625 * a run-length-compressed version of the full array). So we
626 * control advancing through sorted_count_items[] with the
627 * variable "frac", which is defined as (x - y) * (num_hist - 1),
628 * where x is the index in the notional DECs array corresponding
629 * to the start of the next sorted_count_items[] element's run,
630 * and y is the index in DECs from which we should take the next
631 * histogram value. We have to advance whenever x <= y, that is
632 * frac <= 0. The x component is the sum of the frequencies seen
633 * so far (up through the current sorted_count_items[] element),
634 * and of course y * (num_hist - 1) = i * (analyzed_rows - 1),
635 * per the subscript calculation above. (The subscript calculation
636 * implies dropping any fractional part of y; in this formulation
637 * that's handled by not advancing until frac reaches 1.)
639 * Even though frac has a bounded range, it could overflow int32
640 * when working with very large statistics targets, so we do that
644 delta
= analyzed_rows
- 1;
645 j
= 0; /* current index in sorted_count_items */
646 /* Initialize frac for sorted_count_items[0]; y is initially 0 */
647 frac
= (int64
) sorted_count_items
[0]->frequency
* (num_hist
- 1);
648 for (i
= 0; i
< num_hist
; i
++)
652 /* Advance, and update x component of frac */
654 frac
+= (int64
) sorted_count_items
[j
]->frequency
* (num_hist
- 1);
656 hist
[i
] = sorted_count_items
[j
]->count
;
657 frac
-= delta
; /* update y for upcoming i increment */
659 Assert(j
== count_items_count
- 1);
661 stats
->stakind
[slot_idx
] = STATISTIC_KIND_DECHIST
;
662 stats
->staop
[slot_idx
] = extra_data
->eq_opr
;
663 stats
->stacoll
[slot_idx
] = extra_data
->coll_id
;
664 stats
->stanumbers
[slot_idx
] = hist
;
665 stats
->numnumbers
[slot_idx
] = num_hist
+ 1;
671 * We don't need to bother cleaning up any of our temporary palloc's. The
672 * hashtable should also go away, as it used a child memory context.
677 * A function to prune the D structure from the Lossy Counting algorithm.
678 * Consult compute_tsvector_stats() for wider explanation.
681 prune_element_hashtable(HTAB
*elements_tab
, int b_current
)
683 HASH_SEQ_STATUS scan_status
;
686 hash_seq_init(&scan_status
, elements_tab
);
687 while ((item
= (TrackItem
*) hash_seq_search(&scan_status
)) != NULL
)
689 if (item
->frequency
+ item
->delta
<= b_current
)
691 Datum value
= item
->key
;
693 if (hash_search(elements_tab
, &item
->key
,
694 HASH_REMOVE
, NULL
) == NULL
)
695 elog(ERROR
, "hash table corrupted");
696 /* We should free memory if element is not passed by value */
697 if (!array_extra_data
->typbyval
)
698 pfree(DatumGetPointer(value
));
704 * Hash function for elements.
706 * We use the element type's default hash opclass, and the column collation
707 * if the type is collation-sensitive.
710 element_hash(const void *key
, Size keysize
)
712 Datum d
= *((const Datum
*) key
);
715 h
= FunctionCall1Coll(array_extra_data
->hash
,
716 array_extra_data
->coll_id
,
718 return DatumGetUInt32(h
);
722 * Matching function for elements, to be used in hashtable lookups.
725 element_match(const void *key1
, const void *key2
, Size keysize
)
727 /* The keysize parameter is superfluous here */
728 return element_compare(key1
, key2
);
732 * Comparison function for elements.
734 * We use the element type's default btree opclass, and the column collation
735 * if the type is collation-sensitive.
737 * XXX consider using SortSupport infrastructure
740 element_compare(const void *key1
, const void *key2
)
742 Datum d1
= *((const Datum
*) key1
);
743 Datum d2
= *((const Datum
*) key2
);
746 c
= FunctionCall2Coll(array_extra_data
->cmp
,
747 array_extra_data
->coll_id
,
749 return DatumGetInt32(c
);
753 * Comparator for sorting TrackItems by frequencies (descending sort)
756 trackitem_compare_frequencies_desc(const void *e1
, const void *e2
, void *arg
)
758 const TrackItem
*const *t1
= (const TrackItem
*const *) e1
;
759 const TrackItem
*const *t2
= (const TrackItem
*const *) e2
;
761 return (*t2
)->frequency
- (*t1
)->frequency
;
765 * Comparator for sorting TrackItems by element values
768 trackitem_compare_element(const void *e1
, const void *e2
, void *arg
)
770 const TrackItem
*const *t1
= (const TrackItem
*const *) e1
;
771 const TrackItem
*const *t2
= (const TrackItem
*const *) e2
;
773 return element_compare(&(*t1
)->key
, &(*t2
)->key
);
777 * Comparator for sorting DECountItems by count
780 countitem_compare_count(const void *e1
, const void *e2
, void *arg
)
782 const DECountItem
*const *t1
= (const DECountItem
*const *) e1
;
783 const DECountItem
*const *t2
= (const DECountItem
*const *) e2
;
785 if ((*t1
)->count
< (*t2
)->count
)
787 else if ((*t1
)->count
== (*t2
)->count
)