1 /*-------------------------------------------------------------------------
4 * POSTGRES type cache code
6 * The type cache exists to speed lookup of certain information about data
7 * types that is not directly available from a type's pg_type row. In
8 * particular, we use a type's default btree opclass, or the default hash
9 * opclass if no btree opclass exists, to determine which operators should
10 * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
12 * Several seemingly-odd choices have been made to support use of the type
13 * cache by the generic array comparison routines array_eq() and array_cmp().
14 * Because these routines are used as index support operations, they cannot
15 * leak memory. To allow them to execute efficiently, all information that
16 * either of them would like to re-use across calls is made available in the
19 * Once created, a type cache entry lives as long as the backend does, so
20 * there is no need for a call to release a cache entry. (For present uses,
21 * it would be okay to flush type cache entries at the ends of transactions,
22 * if we needed to reclaim space.)
24 * There is presently no provision for clearing out a cache entry if the
25 * stored data becomes obsolete. (The code will work if a type acquires
26 * opclasses it didn't have before while a backend runs --- but not if the
27 * definition of an existing opclass is altered.) However, the relcache
28 * doesn't cope with opclasses changing under it, either, so this seems
29 * a low-priority problem.
31 * We do support clearing the tuple descriptor part of a rowtype's cache
32 * entry, since that may need to change as a consequence of ALTER TABLE.
35 * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
36 * Portions Copyright (c) 1994, Regents of the University of California
41 *-------------------------------------------------------------------------
45 #include "access/hash.h"
46 #include "access/heapam.h"
47 #include "access/nbtree.h"
48 #include "catalog/pg_type.h"
49 #include "commands/defrem.h"
50 #include "utils/builtins.h"
51 #include "utils/lsyscache.h"
52 #include "utils/rel.h"
53 #include "utils/syscache.h"
54 #include "utils/typcache.h"
57 /* The main type cache hashtable searched by lookup_type_cache */
58 static HTAB
*TypeCacheHash
= NULL
;
61 * We use a separate table for storing the definitions of non-anonymous
62 * record types. Once defined, a record type will be remembered for the
63 * life of the backend. Subsequent uses of the "same" record type (where
64 * sameness means equalTupleDescs) will refer to the existing table entry.
66 * Stored record types are remembered in a linear array of TupleDescs,
67 * which can be indexed quickly with the assigned typmod. There is also
68 * a hash table to speed searches for matching TupleDescs. The hash key
69 * uses just the first N columns' type OIDs, and so we may have multiple
70 * entries with the same hash key.
72 #define REC_HASH_KEYS 16 /* use this many columns in hash key */
74 typedef struct RecordCacheEntry
76 /* the hash lookup key MUST BE FIRST */
77 Oid hashkey
[REC_HASH_KEYS
]; /* column type IDs, zero-filled */
79 /* list of TupleDescs for record types with this hashkey */
83 static HTAB
*RecordCacheHash
= NULL
;
85 static TupleDesc
*RecordCacheArray
= NULL
;
86 static int32 RecordCacheArrayLen
= 0; /* allocated length of array */
87 static int32 NextRecordTypmod
= 0; /* number of entries used */
93 * Fetch the type cache entry for the specified datatype, and make sure that
94 * all the fields requested by bits in 'flags' are valid.
96 * The result is never NULL --- we will elog() if the passed type OID is
97 * invalid. Note however that we may fail to find one or more of the
98 * requested opclass-dependent fields; the caller needs to check whether
99 * the fields are InvalidOid or not.
102 lookup_type_cache(Oid type_id
, int flags
)
104 TypeCacheEntry
*typentry
;
107 if (TypeCacheHash
== NULL
)
109 /* First time through: initialize the hash table */
112 if (!CacheMemoryContext
)
113 CreateCacheMemoryContext();
115 MemSet(&ctl
, 0, sizeof(ctl
));
116 ctl
.keysize
= sizeof(Oid
);
117 ctl
.entrysize
= sizeof(TypeCacheEntry
);
119 TypeCacheHash
= hash_create("Type information cache", 64,
120 &ctl
, HASH_ELEM
| HASH_FUNCTION
);
123 /* Try to look up an existing entry */
124 typentry
= (TypeCacheEntry
*) hash_search(TypeCacheHash
,
127 if (typentry
== NULL
)
130 * If we didn't find one, we want to make one. But first look up the
131 * pg_type row, just to make sure we don't make a cache entry for an
137 tp
= SearchSysCache(TYPEOID
,
138 ObjectIdGetDatum(type_id
),
140 if (!HeapTupleIsValid(tp
))
141 elog(ERROR
, "cache lookup failed for type %u", type_id
);
142 typtup
= (Form_pg_type
) GETSTRUCT(tp
);
143 if (!typtup
->typisdefined
)
145 (errcode(ERRCODE_UNDEFINED_OBJECT
),
146 errmsg("type \"%s\" is only a shell",
147 NameStr(typtup
->typname
))));
149 /* Now make the typcache entry */
150 typentry
= (TypeCacheEntry
*) hash_search(TypeCacheHash
,
153 Assert(!found
); /* it wasn't there a moment ago */
155 MemSet(typentry
, 0, sizeof(TypeCacheEntry
));
156 typentry
->type_id
= type_id
;
157 typentry
->typlen
= typtup
->typlen
;
158 typentry
->typbyval
= typtup
->typbyval
;
159 typentry
->typalign
= typtup
->typalign
;
160 typentry
->typtype
= typtup
->typtype
;
161 typentry
->typrelid
= typtup
->typrelid
;
166 /* If we haven't already found the opclass, try to do so */
167 if ((flags
& (TYPECACHE_EQ_OPR
| TYPECACHE_LT_OPR
| TYPECACHE_GT_OPR
|
169 TYPECACHE_EQ_OPR_FINFO
| TYPECACHE_CMP_PROC_FINFO
|
170 TYPECACHE_BTREE_OPFAMILY
)) &&
171 typentry
->btree_opf
== InvalidOid
)
175 opclass
= GetDefaultOpClass(type_id
, BTREE_AM_OID
);
176 if (OidIsValid(opclass
))
178 typentry
->btree_opf
= get_opclass_family(opclass
);
179 typentry
->btree_opintype
= get_opclass_input_type(opclass
);
181 /* Only care about hash opclass if no btree opclass... */
182 if (typentry
->btree_opf
== InvalidOid
)
184 if (typentry
->hash_opf
== InvalidOid
)
186 opclass
= GetDefaultOpClass(type_id
, HASH_AM_OID
);
187 if (OidIsValid(opclass
))
189 typentry
->hash_opf
= get_opclass_family(opclass
);
190 typentry
->hash_opintype
= get_opclass_input_type(opclass
);
197 * If we find a btree opclass where previously we only found a
198 * hash opclass, forget the hash equality operator so we can use
199 * the btree operator instead.
201 typentry
->eq_opr
= InvalidOid
;
202 typentry
->eq_opr_finfo
.fn_oid
= InvalidOid
;
206 /* Look for requested operators and functions */
207 if ((flags
& (TYPECACHE_EQ_OPR
| TYPECACHE_EQ_OPR_FINFO
)) &&
208 typentry
->eq_opr
== InvalidOid
)
210 if (typentry
->btree_opf
!= InvalidOid
)
211 typentry
->eq_opr
= get_opfamily_member(typentry
->btree_opf
,
212 typentry
->btree_opintype
,
213 typentry
->btree_opintype
,
214 BTEqualStrategyNumber
);
215 if (typentry
->eq_opr
== InvalidOid
&&
216 typentry
->hash_opf
!= InvalidOid
)
217 typentry
->eq_opr
= get_opfamily_member(typentry
->hash_opf
,
218 typentry
->hash_opintype
,
219 typentry
->hash_opintype
,
220 HTEqualStrategyNumber
);
222 if ((flags
& TYPECACHE_LT_OPR
) && typentry
->lt_opr
== InvalidOid
)
224 if (typentry
->btree_opf
!= InvalidOid
)
225 typentry
->lt_opr
= get_opfamily_member(typentry
->btree_opf
,
226 typentry
->btree_opintype
,
227 typentry
->btree_opintype
,
228 BTLessStrategyNumber
);
230 if ((flags
& TYPECACHE_GT_OPR
) && typentry
->gt_opr
== InvalidOid
)
232 if (typentry
->btree_opf
!= InvalidOid
)
233 typentry
->gt_opr
= get_opfamily_member(typentry
->btree_opf
,
234 typentry
->btree_opintype
,
235 typentry
->btree_opintype
,
236 BTGreaterStrategyNumber
);
238 if ((flags
& (TYPECACHE_CMP_PROC
| TYPECACHE_CMP_PROC_FINFO
)) &&
239 typentry
->cmp_proc
== InvalidOid
)
241 if (typentry
->btree_opf
!= InvalidOid
)
242 typentry
->cmp_proc
= get_opfamily_proc(typentry
->btree_opf
,
243 typentry
->btree_opintype
,
244 typentry
->btree_opintype
,
249 * Set up fmgr lookup info as requested
251 * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
252 * which is not quite right (they're really in DynaHashContext) but this
253 * will do for our purposes.
255 if ((flags
& TYPECACHE_EQ_OPR_FINFO
) &&
256 typentry
->eq_opr_finfo
.fn_oid
== InvalidOid
&&
257 typentry
->eq_opr
!= InvalidOid
)
261 eq_opr_func
= get_opcode(typentry
->eq_opr
);
262 if (eq_opr_func
!= InvalidOid
)
263 fmgr_info_cxt(eq_opr_func
, &typentry
->eq_opr_finfo
,
266 if ((flags
& TYPECACHE_CMP_PROC_FINFO
) &&
267 typentry
->cmp_proc_finfo
.fn_oid
== InvalidOid
&&
268 typentry
->cmp_proc
!= InvalidOid
)
270 fmgr_info_cxt(typentry
->cmp_proc
, &typentry
->cmp_proc_finfo
,
275 * If it's a composite type (row type), get tupdesc if requested
277 if ((flags
& TYPECACHE_TUPDESC
) &&
278 typentry
->tupDesc
== NULL
&&
279 typentry
->typtype
== TYPTYPE_COMPOSITE
)
283 if (!OidIsValid(typentry
->typrelid
)) /* should not happen */
284 elog(ERROR
, "invalid typrelid for composite type %u",
286 rel
= relation_open(typentry
->typrelid
, AccessShareLock
);
287 Assert(rel
->rd_rel
->reltype
== typentry
->type_id
);
290 * Link to the tupdesc and increment its refcount (we assert it's a
291 * refcounted descriptor). We don't use IncrTupleDescRefCount() for
292 * this, because the reference mustn't be entered in the current
293 * resource owner; it can outlive the current query.
295 typentry
->tupDesc
= RelationGetDescr(rel
);
297 Assert(typentry
->tupDesc
->tdrefcount
> 0);
298 typentry
->tupDesc
->tdrefcount
++;
300 relation_close(rel
, AccessShareLock
);
307 * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
309 * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
310 * hasn't had its refcount bumped.
313 lookup_rowtype_tupdesc_internal(Oid type_id
, int32 typmod
, bool noError
)
315 if (type_id
!= RECORDOID
)
318 * It's a named composite type, so use the regular typcache.
320 TypeCacheEntry
*typentry
;
322 typentry
= lookup_type_cache(type_id
, TYPECACHE_TUPDESC
);
323 if (typentry
->tupDesc
== NULL
&& !noError
)
325 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
326 errmsg("type %s is not composite",
327 format_type_be(type_id
))));
328 return typentry
->tupDesc
;
333 * It's a transient record type, so look in our record-type table.
335 if (typmod
< 0 || typmod
>= NextRecordTypmod
)
339 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
340 errmsg("record type has not been registered")));
343 return RecordCacheArray
[typmod
];
348 * lookup_rowtype_tupdesc
350 * Given a typeid/typmod that should describe a known composite type,
351 * return the tuple descriptor for the type. Will ereport on failure.
353 * Note: on success, we increment the refcount of the returned TupleDesc,
354 * and log the reference in CurrentResourceOwner. Caller should call
355 * ReleaseTupleDesc or DecrTupleDescRefCount when done using the tupdesc.
358 lookup_rowtype_tupdesc(Oid type_id
, int32 typmod
)
362 tupDesc
= lookup_rowtype_tupdesc_internal(type_id
, typmod
, false);
363 IncrTupleDescRefCount(tupDesc
);
368 * lookup_rowtype_tupdesc_noerror
370 * As above, but if the type is not a known composite type and noError
371 * is true, returns NULL instead of ereport'ing. (Note that if a bogus
372 * type_id is passed, you'll get an ereport anyway.)
375 lookup_rowtype_tupdesc_noerror(Oid type_id
, int32 typmod
, bool noError
)
379 tupDesc
= lookup_rowtype_tupdesc_internal(type_id
, typmod
, noError
);
381 IncrTupleDescRefCount(tupDesc
);
386 * lookup_rowtype_tupdesc_copy
388 * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
389 * copied into the CurrentMemoryContext and is not reference-counted.
392 lookup_rowtype_tupdesc_copy(Oid type_id
, int32 typmod
)
396 tmp
= lookup_rowtype_tupdesc_internal(type_id
, typmod
, false);
397 return CreateTupleDescCopyConstr(tmp
);
402 * assign_record_type_typmod
404 * Given a tuple descriptor for a RECORD type, find or create a cache entry
405 * for the type, and set the tupdesc's tdtypmod field to a value that will
406 * identify this cache entry to lookup_rowtype_tupdesc.
409 assign_record_type_typmod(TupleDesc tupDesc
)
411 RecordCacheEntry
*recentry
;
413 Oid hashkey
[REC_HASH_KEYS
];
418 MemoryContext oldcxt
;
420 Assert(tupDesc
->tdtypeid
== RECORDOID
);
422 if (RecordCacheHash
== NULL
)
424 /* First time through: initialize the hash table */
427 if (!CacheMemoryContext
)
428 CreateCacheMemoryContext();
430 MemSet(&ctl
, 0, sizeof(ctl
));
431 ctl
.keysize
= REC_HASH_KEYS
* sizeof(Oid
);
432 ctl
.entrysize
= sizeof(RecordCacheEntry
);
434 RecordCacheHash
= hash_create("Record information cache", 64,
435 &ctl
, HASH_ELEM
| HASH_FUNCTION
);
438 /* Find or create a hashtable entry for this hash class */
439 MemSet(hashkey
, 0, sizeof(hashkey
));
440 for (i
= 0; i
< tupDesc
->natts
; i
++)
442 if (i
>= REC_HASH_KEYS
)
444 hashkey
[i
] = tupDesc
->attrs
[i
]->atttypid
;
446 recentry
= (RecordCacheEntry
*) hash_search(RecordCacheHash
,
451 /* New entry ... hash_search initialized only the hash key */
452 recentry
->tupdescs
= NIL
;
455 /* Look for existing record cache entry */
456 foreach(l
, recentry
->tupdescs
)
458 entDesc
= (TupleDesc
) lfirst(l
);
459 if (equalTupleDescs(tupDesc
, entDesc
))
461 tupDesc
->tdtypmod
= entDesc
->tdtypmod
;
466 /* Not present, so need to manufacture an entry */
467 oldcxt
= MemoryContextSwitchTo(CacheMemoryContext
);
469 if (RecordCacheArray
== NULL
)
471 RecordCacheArray
= (TupleDesc
*) palloc(64 * sizeof(TupleDesc
));
472 RecordCacheArrayLen
= 64;
474 else if (NextRecordTypmod
>= RecordCacheArrayLen
)
476 int32 newlen
= RecordCacheArrayLen
* 2;
478 RecordCacheArray
= (TupleDesc
*) repalloc(RecordCacheArray
,
479 newlen
* sizeof(TupleDesc
));
480 RecordCacheArrayLen
= newlen
;
483 /* if fail in subrs, no damage except possibly some wasted memory... */
484 entDesc
= CreateTupleDescCopy(tupDesc
);
485 recentry
->tupdescs
= lcons(entDesc
, recentry
->tupdescs
);
486 /* mark it as a reference-counted tupdesc */
487 entDesc
->tdrefcount
= 1;
488 /* now it's safe to advance NextRecordTypmod */
489 newtypmod
= NextRecordTypmod
++;
490 entDesc
->tdtypmod
= newtypmod
;
491 RecordCacheArray
[newtypmod
] = entDesc
;
493 /* report to caller as well */
494 tupDesc
->tdtypmod
= newtypmod
;
496 MemoryContextSwitchTo(oldcxt
);
500 * flush_rowtype_cache
502 * If a typcache entry exists for a rowtype, delete the entry's cached
503 * tuple descriptor link. This is called from relcache.c when a cached
504 * relation tupdesc is about to be dropped.
507 flush_rowtype_cache(Oid type_id
)
509 TypeCacheEntry
*typentry
;
511 if (TypeCacheHash
== NULL
)
512 return; /* no table, so certainly no entry */
514 typentry
= (TypeCacheEntry
*) hash_search(TypeCacheHash
,
517 if (typentry
== NULL
)
518 return; /* no matching entry */
519 if (typentry
->tupDesc
== NULL
)
520 return; /* tupdesc hasn't been requested */
523 * Release our refcount and free the tupdesc if none remain. (Can't use
524 * DecrTupleDescRefCount because this reference is not logged in current
527 Assert(typentry
->tupDesc
->tdrefcount
> 0);
528 if (--typentry
->tupDesc
->tdrefcount
== 0)
529 FreeTupleDesc(typentry
->tupDesc
);
531 typentry
->tupDesc
= NULL
;