1 /*-------------------------------------------------------------------------
4 * System catalog cache for tuples matching a key.
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/utils/cache/catcache.c
13 *-------------------------------------------------------------------------
17 #include "access/genam.h"
18 #include "access/heaptoast.h"
19 #include "access/relscan.h"
20 #include "access/table.h"
21 #include "access/xact.h"
22 #include "catalog/catalog.h"
23 #include "catalog/pg_collation.h"
24 #include "catalog/pg_type.h"
25 #include "common/hashfn.h"
26 #include "common/pg_prng.h"
27 #include "miscadmin.h"
28 #include "port/pg_bitutils.h"
30 #include "storage/ipc.h" /* for on_proc_exit */
32 #include "storage/lmgr.h"
33 #include "utils/builtins.h"
34 #include "utils/catcache.h"
35 #include "utils/datum.h"
36 #include "utils/fmgroids.h"
37 #include "utils/injection_point.h"
38 #include "utils/inval.h"
39 #include "utils/memutils.h"
40 #include "utils/rel.h"
41 #include "utils/resowner.h"
42 #include "utils/syscache.h"
45 * If a catcache invalidation is processed while we are in the middle of
46 * creating a catcache entry (or list), it might apply to the entry we're
47 * creating, making it invalid before it's been inserted to the catcache. To
48 * catch such cases, we have a stack of "create-in-progress" entries. Cache
49 * invalidation marks any matching entries in the stack as dead, in addition
50 * to the actual CatCTup and CatCList entries.
52 typedef struct CatCInProgress
54 CatCache
*cache
; /* cache that the entry belongs to */
55 uint32 hash_value
; /* hash of the entry; ignored for lists */
56 bool list
; /* is it a list entry? */
57 bool dead
; /* set when the entry is invalidated */
58 struct CatCInProgress
*next
;
61 static CatCInProgress
*catcache_in_progress_stack
= NULL
;
63 /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
66 * Given a hash value and the size of the hash table, find the bucket
67 * in which the hash value belongs. Since the hash table must contain
68 * a power-of-2 number of elements, this is a simple bitmask.
70 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
74 * variables, macros and other stuff
78 #define CACHE_elog(...) elog(__VA_ARGS__)
80 #define CACHE_elog(...)
83 /* Cache management header --- pointer is NULL until created */
84 static CatCacheHeader
*CacheHdr
= NULL
;
86 static inline HeapTuple
SearchCatCacheInternal(CatCache
*cache
,
91 static pg_noinline HeapTuple
SearchCatCacheMiss(CatCache
*cache
,
98 static uint32
CatalogCacheComputeHashValue(CatCache
*cache
, int nkeys
,
99 Datum v1
, Datum v2
, Datum v3
, Datum v4
);
100 static uint32
CatalogCacheComputeTupleHashValue(CatCache
*cache
, int nkeys
,
102 static inline bool CatalogCacheCompareTuple(const CatCache
*cache
, int nkeys
,
103 const Datum
*cachekeys
,
104 const Datum
*searchkeys
);
106 #ifdef CATCACHE_STATS
107 static void CatCachePrintStats(int code
, Datum arg
);
109 static void CatCacheRemoveCTup(CatCache
*cache
, CatCTup
*ct
);
110 static void CatCacheRemoveCList(CatCache
*cache
, CatCList
*cl
);
111 static void RehashCatCache(CatCache
*cp
);
112 static void RehashCatCacheLists(CatCache
*cp
);
113 static void CatalogCacheInitializeCache(CatCache
*cache
);
114 static CatCTup
*CatalogCacheCreateEntry(CatCache
*cache
, HeapTuple ntp
,
116 uint32 hashValue
, Index hashIndex
);
118 static void ReleaseCatCacheWithOwner(HeapTuple tuple
, ResourceOwner resowner
);
119 static void ReleaseCatCacheListWithOwner(CatCList
*list
, ResourceOwner resowner
);
120 static void CatCacheFreeKeys(TupleDesc tupdesc
, int nkeys
, int *attnos
,
122 static void CatCacheCopyKeys(TupleDesc tupdesc
, int nkeys
, int *attnos
,
123 Datum
*srckeys
, Datum
*dstkeys
);
127 * internal support functions
130 /* ResourceOwner callbacks to hold catcache references */
132 static void ResOwnerReleaseCatCache(Datum res
);
133 static char *ResOwnerPrintCatCache(Datum res
);
134 static void ResOwnerReleaseCatCacheList(Datum res
);
135 static char *ResOwnerPrintCatCacheList(Datum res
);
137 static const ResourceOwnerDesc catcache_resowner_desc
=
139 /* catcache references */
140 .name
= "catcache reference",
141 .release_phase
= RESOURCE_RELEASE_AFTER_LOCKS
,
142 .release_priority
= RELEASE_PRIO_CATCACHE_REFS
,
143 .ReleaseResource
= ResOwnerReleaseCatCache
,
144 .DebugPrint
= ResOwnerPrintCatCache
147 static const ResourceOwnerDesc catlistref_resowner_desc
=
149 /* catcache-list pins */
150 .name
= "catcache list reference",
151 .release_phase
= RESOURCE_RELEASE_AFTER_LOCKS
,
152 .release_priority
= RELEASE_PRIO_CATCACHE_LIST_REFS
,
153 .ReleaseResource
= ResOwnerReleaseCatCacheList
,
154 .DebugPrint
= ResOwnerPrintCatCacheList
157 /* Convenience wrappers over ResourceOwnerRemember/Forget */
159 ResourceOwnerRememberCatCacheRef(ResourceOwner owner
, HeapTuple tuple
)
161 ResourceOwnerRemember(owner
, PointerGetDatum(tuple
), &catcache_resowner_desc
);
164 ResourceOwnerForgetCatCacheRef(ResourceOwner owner
, HeapTuple tuple
)
166 ResourceOwnerForget(owner
, PointerGetDatum(tuple
), &catcache_resowner_desc
);
169 ResourceOwnerRememberCatCacheListRef(ResourceOwner owner
, CatCList
*list
)
171 ResourceOwnerRemember(owner
, PointerGetDatum(list
), &catlistref_resowner_desc
);
174 ResourceOwnerForgetCatCacheListRef(ResourceOwner owner
, CatCList
*list
)
176 ResourceOwnerForget(owner
, PointerGetDatum(list
), &catlistref_resowner_desc
);
181 * Hash and equality functions for system types that are used as cache key
182 * fields. In some cases, we just call the regular SQL-callable functions for
183 * the appropriate data type, but that tends to be a little slow, and the
184 * speed of these functions is performance-critical. Therefore, for data
185 * types that frequently occur as catcache keys, we hard-code the logic here.
186 * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
187 * in certain cases (like int4) we can adopt a faster hash algorithm as well.
191 chareqfast(Datum a
, Datum b
)
193 return DatumGetChar(a
) == DatumGetChar(b
);
197 charhashfast(Datum datum
)
199 return murmurhash32((int32
) DatumGetChar(datum
));
203 nameeqfast(Datum a
, Datum b
)
205 char *ca
= NameStr(*DatumGetName(a
));
206 char *cb
= NameStr(*DatumGetName(b
));
208 return strncmp(ca
, cb
, NAMEDATALEN
) == 0;
212 namehashfast(Datum datum
)
214 char *key
= NameStr(*DatumGetName(datum
));
216 return hash_any((unsigned char *) key
, strlen(key
));
220 int2eqfast(Datum a
, Datum b
)
222 return DatumGetInt16(a
) == DatumGetInt16(b
);
226 int2hashfast(Datum datum
)
228 return murmurhash32((int32
) DatumGetInt16(datum
));
232 int4eqfast(Datum a
, Datum b
)
234 return DatumGetInt32(a
) == DatumGetInt32(b
);
238 int4hashfast(Datum datum
)
240 return murmurhash32((int32
) DatumGetInt32(datum
));
244 texteqfast(Datum a
, Datum b
)
247 * The use of DEFAULT_COLLATION_OID is fairly arbitrary here. We just
248 * want to take the fast "deterministic" path in texteq().
250 return DatumGetBool(DirectFunctionCall2Coll(texteq
, DEFAULT_COLLATION_OID
, a
, b
));
254 texthashfast(Datum datum
)
256 /* analogously here as in texteqfast() */
257 return DatumGetInt32(DirectFunctionCall1Coll(hashtext
, DEFAULT_COLLATION_OID
, datum
));
261 oidvectoreqfast(Datum a
, Datum b
)
263 return DatumGetBool(DirectFunctionCall2(oidvectoreq
, a
, b
));
267 oidvectorhashfast(Datum datum
)
269 return DatumGetInt32(DirectFunctionCall1(hashoidvector
, datum
));
272 /* Lookup support functions for a type. */
274 GetCCHashEqFuncs(Oid keytype
, CCHashFN
*hashfunc
, RegProcedure
*eqfunc
, CCFastEqualFN
*fasteqfunc
)
279 *hashfunc
= charhashfast
;
280 *fasteqfunc
= chareqfast
;
284 *hashfunc
= charhashfast
;
285 *fasteqfunc
= chareqfast
;
289 *hashfunc
= namehashfast
;
290 *fasteqfunc
= nameeqfast
;
294 *hashfunc
= int2hashfast
;
295 *fasteqfunc
= int2eqfast
;
299 *hashfunc
= int4hashfast
;
300 *fasteqfunc
= int4eqfast
;
304 *hashfunc
= texthashfast
;
305 *fasteqfunc
= texteqfast
;
310 case REGPROCEDUREOID
:
315 case REGCOLLATIONOID
:
317 case REGDICTIONARYOID
:
319 case REGNAMESPACEOID
:
320 *hashfunc
= int4hashfast
;
321 *fasteqfunc
= int4eqfast
;
325 *hashfunc
= oidvectorhashfast
;
326 *fasteqfunc
= oidvectoreqfast
;
327 *eqfunc
= F_OIDVECTOREQ
;
330 elog(FATAL
, "type %u not supported as catcache key", keytype
);
331 *hashfunc
= NULL
; /* keep compiler quiet */
333 *eqfunc
= InvalidOid
;
339 * CatalogCacheComputeHashValue
341 * Compute the hash value associated with a given set of lookup keys
344 CatalogCacheComputeHashValue(CatCache
*cache
, int nkeys
,
345 Datum v1
, Datum v2
, Datum v3
, Datum v4
)
347 uint32 hashValue
= 0;
349 CCHashFN
*cc_hashfunc
= cache
->cc_hashfunc
;
351 CACHE_elog(DEBUG2
, "CatalogCacheComputeHashValue %s %d %p",
352 cache
->cc_relname
, nkeys
, cache
);
357 oneHash
= (cc_hashfunc
[3]) (v4
);
358 hashValue
^= pg_rotate_left32(oneHash
, 24);
361 oneHash
= (cc_hashfunc
[2]) (v3
);
362 hashValue
^= pg_rotate_left32(oneHash
, 16);
365 oneHash
= (cc_hashfunc
[1]) (v2
);
366 hashValue
^= pg_rotate_left32(oneHash
, 8);
369 oneHash
= (cc_hashfunc
[0]) (v1
);
370 hashValue
^= oneHash
;
373 elog(FATAL
, "wrong number of hash keys: %d", nkeys
);
381 * CatalogCacheComputeTupleHashValue
383 * Compute the hash value associated with a given tuple to be cached
386 CatalogCacheComputeTupleHashValue(CatCache
*cache
, int nkeys
, HeapTuple tuple
)
393 int *cc_keyno
= cache
->cc_keyno
;
394 TupleDesc cc_tupdesc
= cache
->cc_tupdesc
;
396 /* Now extract key fields from tuple, insert into scankey */
400 v4
= fastgetattr(tuple
,
407 v3
= fastgetattr(tuple
,
414 v2
= fastgetattr(tuple
,
421 v1
= fastgetattr(tuple
,
428 elog(FATAL
, "wrong number of hash keys: %d", nkeys
);
432 return CatalogCacheComputeHashValue(cache
, nkeys
, v1
, v2
, v3
, v4
);
436 * CatalogCacheCompareTuple
438 * Compare a tuple to the passed arguments.
441 CatalogCacheCompareTuple(const CatCache
*cache
, int nkeys
,
442 const Datum
*cachekeys
,
443 const Datum
*searchkeys
)
445 const CCFastEqualFN
*cc_fastequal
= cache
->cc_fastequal
;
448 for (i
= 0; i
< nkeys
; i
++)
450 if (!(cc_fastequal
[i
]) (cachekeys
[i
], searchkeys
[i
]))
457 #ifdef CATCACHE_STATS
460 CatCachePrintStats(int code
, Datum arg
)
463 long cc_searches
= 0;
465 long cc_neg_hits
= 0;
466 long cc_newloads
= 0;
469 long cc_lsearches
= 0;
472 slist_foreach(iter
, &CacheHdr
->ch_caches
)
474 CatCache
*cache
= slist_container(CatCache
, cc_next
, iter
.cur
);
476 if (cache
->cc_ntup
== 0 && cache
->cc_searches
== 0)
477 continue; /* don't print unused caches */
478 elog(DEBUG2
, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %d lists, %ld lsrch, %ld lhits",
485 cache
->cc_hits
+ cache
->cc_neg_hits
,
487 cache
->cc_searches
- cache
->cc_hits
- cache
->cc_neg_hits
- cache
->cc_newloads
,
488 cache
->cc_searches
- cache
->cc_hits
- cache
->cc_neg_hits
,
493 cc_searches
+= cache
->cc_searches
;
494 cc_hits
+= cache
->cc_hits
;
495 cc_neg_hits
+= cache
->cc_neg_hits
;
496 cc_newloads
+= cache
->cc_newloads
;
497 cc_invals
+= cache
->cc_invals
;
498 cc_nlists
+= cache
->cc_nlist
;
499 cc_lsearches
+= cache
->cc_lsearches
;
500 cc_lhits
+= cache
->cc_lhits
;
502 elog(DEBUG2
, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lists, %ld lsrch, %ld lhits",
507 cc_hits
+ cc_neg_hits
,
509 cc_searches
- cc_hits
- cc_neg_hits
- cc_newloads
,
510 cc_searches
- cc_hits
- cc_neg_hits
,
516 #endif /* CATCACHE_STATS */
522 * Unlink and delete the given cache entry
524 * NB: if it is a member of a CatCList, the CatCList is deleted too.
525 * Both the cache entry and the list had better have zero refcount.
528 CatCacheRemoveCTup(CatCache
*cache
, CatCTup
*ct
)
530 Assert(ct
->refcount
== 0);
531 Assert(ct
->my_cache
== cache
);
536 * The cleanest way to handle this is to call CatCacheRemoveCList,
537 * which will recurse back to me, and the recursive call will do the
538 * work. Set the "dead" flag to make sure it does recurse.
541 CatCacheRemoveCList(cache
, ct
->c_list
);
542 return; /* nothing left to do */
545 /* delink from linked list */
546 dlist_delete(&ct
->cache_elem
);
549 * Free keys when we're dealing with a negative entry, normal entries just
550 * point into tuple, allocated together with the CatCTup.
553 CatCacheFreeKeys(cache
->cc_tupdesc
, cache
->cc_nkeys
,
554 cache
->cc_keyno
, ct
->keys
);
563 * CatCacheRemoveCList
565 * Unlink and delete the given cache list entry
567 * NB: any dead member entries that become unreferenced are deleted too.
570 CatCacheRemoveCList(CatCache
*cache
, CatCList
*cl
)
574 Assert(cl
->refcount
== 0);
575 Assert(cl
->my_cache
== cache
);
577 /* delink from member tuples */
578 for (i
= cl
->n_members
; --i
>= 0;)
580 CatCTup
*ct
= cl
->members
[i
];
582 Assert(ct
->c_list
== cl
);
584 /* if the member is dead and now has no references, remove it */
586 #ifndef CATCACHE_FORCE_RELEASE
590 CatCacheRemoveCTup(cache
, ct
);
593 /* delink from linked list */
594 dlist_delete(&cl
->cache_elem
);
596 /* free associated column data */
597 CatCacheFreeKeys(cache
->cc_tupdesc
, cl
->nkeys
,
598 cache
->cc_keyno
, cl
->keys
);
609 * Invalidate entries in the specified cache, given a hash value.
611 * We delete cache entries that match the hash value, whether positive
612 * or negative. We don't care whether the invalidation is the result
613 * of a tuple insertion or a deletion.
615 * We used to try to match positive cache entries by TID, but that is
616 * unsafe after a VACUUM FULL on a system catalog: an inval event could
617 * be queued before VACUUM FULL, and then processed afterwards, when the
618 * target tuple that has to be invalidated has a different TID than it
619 * did when the event was created. So now we just compare hash values and
620 * accept the small risk of unnecessary invalidations due to false matches.
622 * This routine is only quasi-public: it should only be used by inval.c.
625 CatCacheInvalidate(CatCache
*cache
, uint32 hashValue
)
628 dlist_mutable_iter iter
;
630 CACHE_elog(DEBUG2
, "CatCacheInvalidate: called");
633 * We don't bother to check whether the cache has finished initialization
634 * yet; if not, there will be no entries in it so no problem.
638 * Invalidate *all* CatCLists in this cache; it's too hard to tell which
639 * searches might still be correct, so just zap 'em all.
641 for (int i
= 0; i
< cache
->cc_nlbuckets
; i
++)
643 dlist_head
*bucket
= &cache
->cc_lbucket
[i
];
645 dlist_foreach_modify(iter
, bucket
)
647 CatCList
*cl
= dlist_container(CatCList
, cache_elem
, iter
.cur
);
649 if (cl
->refcount
> 0)
652 CatCacheRemoveCList(cache
, cl
);
657 * inspect the proper hash bucket for tuple matches
659 hashIndex
= HASH_INDEX(hashValue
, cache
->cc_nbuckets
);
660 dlist_foreach_modify(iter
, &cache
->cc_bucket
[hashIndex
])
662 CatCTup
*ct
= dlist_container(CatCTup
, cache_elem
, iter
.cur
);
664 if (hashValue
== ct
->hash_value
)
666 if (ct
->refcount
> 0 ||
667 (ct
->c_list
&& ct
->c_list
->refcount
> 0))
670 /* list, if any, was marked dead above */
671 Assert(ct
->c_list
== NULL
|| ct
->c_list
->dead
);
674 CatCacheRemoveCTup(cache
, ct
);
675 CACHE_elog(DEBUG2
, "CatCacheInvalidate: invalidated");
676 #ifdef CATCACHE_STATS
679 /* could be multiple matches, so keep looking! */
683 /* Also invalidate any entries that are being built */
684 for (CatCInProgress
*e
= catcache_in_progress_stack
; e
!= NULL
; e
= e
->next
)
686 if (e
->cache
== cache
)
688 if (e
->list
|| e
->hash_value
== hashValue
)
694 /* ----------------------------------------------------------------
696 * ----------------------------------------------------------------
701 * Standard routine for creating cache context if it doesn't exist yet
703 * There are a lot of places (probably far more than necessary) that check
704 * whether CacheMemoryContext exists yet and want to create it if not.
705 * We centralize knowledge of exactly how to create it here.
708 CreateCacheMemoryContext(void)
711 * Purely for paranoia, check that context doesn't exist; caller probably
714 if (!CacheMemoryContext
)
715 CacheMemoryContext
= AllocSetContextCreate(TopMemoryContext
,
716 "CacheMemoryContext",
717 ALLOCSET_DEFAULT_SIZES
);
724 * Reset one catalog cache to empty.
726 * This is not very efficient if the target cache is nearly empty.
727 * However, it shouldn't need to be efficient; we don't invoke it often.
729 * If 'debug_discard' is true, we are being called as part of
730 * debug_discard_caches. In that case, the cache is not reset for
731 * correctness, but just to get more testing of cache invalidation. We skip
732 * resetting in-progress build entries in that case, or we'd never make any
736 ResetCatalogCache(CatCache
*cache
, bool debug_discard
)
738 dlist_mutable_iter iter
;
741 /* Remove each list in this cache, or at least mark it dead */
742 for (i
= 0; i
< cache
->cc_nlbuckets
; i
++)
744 dlist_head
*bucket
= &cache
->cc_lbucket
[i
];
746 dlist_foreach_modify(iter
, bucket
)
748 CatCList
*cl
= dlist_container(CatCList
, cache_elem
, iter
.cur
);
750 if (cl
->refcount
> 0)
753 CatCacheRemoveCList(cache
, cl
);
757 /* Remove each tuple in this cache, or at least mark it dead */
758 for (i
= 0; i
< cache
->cc_nbuckets
; i
++)
760 dlist_head
*bucket
= &cache
->cc_bucket
[i
];
762 dlist_foreach_modify(iter
, bucket
)
764 CatCTup
*ct
= dlist_container(CatCTup
, cache_elem
, iter
.cur
);
766 if (ct
->refcount
> 0 ||
767 (ct
->c_list
&& ct
->c_list
->refcount
> 0))
770 /* list, if any, was marked dead above */
771 Assert(ct
->c_list
== NULL
|| ct
->c_list
->dead
);
774 CatCacheRemoveCTup(cache
, ct
);
775 #ifdef CATCACHE_STATS
781 /* Also invalidate any entries that are being built */
784 for (CatCInProgress
*e
= catcache_in_progress_stack
; e
!= NULL
; e
= e
->next
)
786 if (e
->cache
== cache
)
795 * Reset all caches when a shared cache inval event forces it
798 ResetCatalogCaches(void)
800 ResetCatalogCachesExt(false);
804 ResetCatalogCachesExt(bool debug_discard
)
808 CACHE_elog(DEBUG2
, "ResetCatalogCaches called");
810 slist_foreach(iter
, &CacheHdr
->ch_caches
)
812 CatCache
*cache
= slist_container(CatCache
, cc_next
, iter
.cur
);
814 ResetCatalogCache(cache
, debug_discard
);
817 CACHE_elog(DEBUG2
, "end of ResetCatalogCaches call");
821 * CatalogCacheFlushCatalog
823 * Flush all catcache entries that came from the specified system catalog.
824 * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
825 * tuples very likely now have different TIDs than before. (At one point
826 * we also tried to force re-execution of CatalogCacheInitializeCache for
827 * the cache(s) on that catalog. This is a bad idea since it leads to all
828 * kinds of trouble if a cache flush occurs while loading cache entries.
829 * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
830 * rather than relying on the relcache to keep a tupdesc for us. Of course
831 * this assumes the tupdesc of a cachable system table will not change...)
834 CatalogCacheFlushCatalog(Oid catId
)
838 CACHE_elog(DEBUG2
, "CatalogCacheFlushCatalog called for %u", catId
);
840 slist_foreach(iter
, &CacheHdr
->ch_caches
)
842 CatCache
*cache
= slist_container(CatCache
, cc_next
, iter
.cur
);
844 /* Does this cache store tuples of the target catalog? */
845 if (cache
->cc_reloid
== catId
)
847 /* Yes, so flush all its contents */
848 ResetCatalogCache(cache
, false);
850 /* Tell inval.c to call syscache callbacks for this cache */
851 CallSyscacheCallbacks(cache
->id
, 0);
855 CACHE_elog(DEBUG2
, "end of CatalogCacheFlushCatalog call");
861 * This allocates and initializes a cache for a system catalog relation.
862 * Actually, the cache is only partially initialized to avoid opening the
863 * relation. The relation will be opened and the rest of the cache
864 * structure initialized on the first access.
867 #define InitCatCache_DEBUG2 \
869 elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
870 cp->cc_reloid, cp->cc_indexoid, cp->id, \
871 cp->cc_nkeys, cp->cc_nbuckets); \
874 #define InitCatCache_DEBUG2
886 MemoryContext oldcxt
;
890 * nbuckets is the initial number of hash buckets to use in this catcache.
891 * It will be enlarged later if it becomes too full.
893 * nbuckets must be a power of two. We check this via Assert rather than
894 * a full runtime check because the values will be coming from constant
897 * If you're confused by the power-of-two check, see comments in
898 * bitmapset.c for an explanation.
900 Assert(nbuckets
> 0 && (nbuckets
& -nbuckets
) == nbuckets
);
903 * first switch to the cache context so our allocations do not vanish at
904 * the end of a transaction
906 if (!CacheMemoryContext
)
907 CreateCacheMemoryContext();
909 oldcxt
= MemoryContextSwitchTo(CacheMemoryContext
);
912 * if first time through, initialize the cache group header
914 if (CacheHdr
== NULL
)
916 CacheHdr
= (CatCacheHeader
*) palloc(sizeof(CatCacheHeader
));
917 slist_init(&CacheHdr
->ch_caches
);
918 CacheHdr
->ch_ntup
= 0;
919 #ifdef CATCACHE_STATS
920 /* set up to dump stats at backend exit */
921 on_proc_exit(CatCachePrintStats
, 0);
926 * Allocate a new cache structure, aligning to a cacheline boundary
928 * Note: we rely on zeroing to initialize all the dlist headers correctly
930 cp
= (CatCache
*) palloc_aligned(sizeof(CatCache
), PG_CACHE_LINE_SIZE
,
932 cp
->cc_bucket
= palloc0(nbuckets
* sizeof(dlist_head
));
935 * Many catcaches never receive any list searches. Therefore, we don't
936 * allocate the cc_lbuckets till we get a list search.
938 cp
->cc_lbucket
= NULL
;
941 * initialize the cache's relation information for the relation
942 * corresponding to this cache, and initialize some of the new cache's
943 * other internal fields. But don't open the relation yet.
946 cp
->cc_relname
= "(not known yet)";
947 cp
->cc_reloid
= reloid
;
948 cp
->cc_indexoid
= indexoid
;
949 cp
->cc_relisshared
= false; /* temporary */
950 cp
->cc_tupdesc
= (TupleDesc
) NULL
;
953 cp
->cc_nbuckets
= nbuckets
;
954 cp
->cc_nlbuckets
= 0;
955 cp
->cc_nkeys
= nkeys
;
956 for (i
= 0; i
< nkeys
; ++i
)
958 Assert(AttributeNumberIsValid(key
[i
]));
959 cp
->cc_keyno
[i
] = key
[i
];
963 * new cache is initialized as far as we can go for now. print some
964 * debugging information, if appropriate.
969 * add completed cache to top of group header's list
971 slist_push_head(&CacheHdr
->ch_caches
, &cp
->cc_next
);
974 * back to the old context before we return...
976 MemoryContextSwitchTo(oldcxt
);
982 * Enlarge a catcache, doubling the number of buckets.
985 RehashCatCache(CatCache
*cp
)
987 dlist_head
*newbucket
;
991 elog(DEBUG1
, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
992 cp
->id
, cp
->cc_relname
, cp
->cc_ntup
, cp
->cc_nbuckets
);
994 /* Allocate a new, larger, hash table. */
995 newnbuckets
= cp
->cc_nbuckets
* 2;
996 newbucket
= (dlist_head
*) MemoryContextAllocZero(CacheMemoryContext
, newnbuckets
* sizeof(dlist_head
));
998 /* Move all entries from old hash table to new. */
999 for (i
= 0; i
< cp
->cc_nbuckets
; i
++)
1001 dlist_mutable_iter iter
;
1003 dlist_foreach_modify(iter
, &cp
->cc_bucket
[i
])
1005 CatCTup
*ct
= dlist_container(CatCTup
, cache_elem
, iter
.cur
);
1006 int hashIndex
= HASH_INDEX(ct
->hash_value
, newnbuckets
);
1008 dlist_delete(iter
.cur
);
1009 dlist_push_head(&newbucket
[hashIndex
], &ct
->cache_elem
);
1013 /* Switch to the new array. */
1014 pfree(cp
->cc_bucket
);
1015 cp
->cc_nbuckets
= newnbuckets
;
1016 cp
->cc_bucket
= newbucket
;
1020 * Enlarge a catcache's list storage, doubling the number of buckets.
1023 RehashCatCacheLists(CatCache
*cp
)
1025 dlist_head
*newbucket
;
1029 elog(DEBUG1
, "rehashing catalog cache id %d for %s; %d lists, %d buckets",
1030 cp
->id
, cp
->cc_relname
, cp
->cc_nlist
, cp
->cc_nlbuckets
);
1032 /* Allocate a new, larger, hash table. */
1033 newnbuckets
= cp
->cc_nlbuckets
* 2;
1034 newbucket
= (dlist_head
*) MemoryContextAllocZero(CacheMemoryContext
, newnbuckets
* sizeof(dlist_head
));
1036 /* Move all entries from old hash table to new. */
1037 for (i
= 0; i
< cp
->cc_nlbuckets
; i
++)
1039 dlist_mutable_iter iter
;
1041 dlist_foreach_modify(iter
, &cp
->cc_lbucket
[i
])
1043 CatCList
*cl
= dlist_container(CatCList
, cache_elem
, iter
.cur
);
1044 int hashIndex
= HASH_INDEX(cl
->hash_value
, newnbuckets
);
1046 dlist_delete(iter
.cur
);
1047 dlist_push_head(&newbucket
[hashIndex
], &cl
->cache_elem
);
1051 /* Switch to the new array. */
1052 pfree(cp
->cc_lbucket
);
1053 cp
->cc_nlbuckets
= newnbuckets
;
1054 cp
->cc_lbucket
= newbucket
;
1058 * CatalogCacheInitializeCache
1060 * This function does final initialization of a catcache: obtain the tuple
1061 * descriptor and set up the hash and equality function links. We assume
1062 * that the relcache entry can be opened at this point!
1065 #define CatalogCacheInitializeCache_DEBUG1 \
1066 elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
1069 #define CatalogCacheInitializeCache_DEBUG2 \
1071 if (cache->cc_keyno[i] > 0) { \
1072 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
1073 i+1, cache->cc_nkeys, cache->cc_keyno[i], \
1074 TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
1076 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
1077 i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
1081 #define CatalogCacheInitializeCache_DEBUG1
1082 #define CatalogCacheInitializeCache_DEBUG2
1086 CatalogCacheInitializeCache(CatCache
*cache
)
1089 MemoryContext oldcxt
;
1093 CatalogCacheInitializeCache_DEBUG1
;
1095 relation
= table_open(cache
->cc_reloid
, AccessShareLock
);
1098 * switch to the cache context so our allocations do not vanish at the end
1101 Assert(CacheMemoryContext
!= NULL
);
1103 oldcxt
= MemoryContextSwitchTo(CacheMemoryContext
);
1106 * copy the relcache's tuple descriptor to permanent cache storage
1108 tupdesc
= CreateTupleDescCopyConstr(RelationGetDescr(relation
));
1111 * save the relation's name and relisshared flag, too (cc_relname is used
1112 * only for debugging purposes)
1114 cache
->cc_relname
= pstrdup(RelationGetRelationName(relation
));
1115 cache
->cc_relisshared
= RelationGetForm(relation
)->relisshared
;
1118 * return to the caller's memory context and close the rel
1120 MemoryContextSwitchTo(oldcxt
);
1122 table_close(relation
, AccessShareLock
);
1124 CACHE_elog(DEBUG2
, "CatalogCacheInitializeCache: %s, %d keys",
1125 cache
->cc_relname
, cache
->cc_nkeys
);
1128 * initialize cache's key information
1130 for (i
= 0; i
< cache
->cc_nkeys
; ++i
)
1133 RegProcedure eqfunc
;
1135 CatalogCacheInitializeCache_DEBUG2
;
1137 if (cache
->cc_keyno
[i
] > 0)
1139 Form_pg_attribute attr
= TupleDescAttr(tupdesc
,
1140 cache
->cc_keyno
[i
] - 1);
1142 keytype
= attr
->atttypid
;
1143 /* cache key columns should always be NOT NULL */
1144 Assert(attr
->attnotnull
);
1148 if (cache
->cc_keyno
[i
] < 0)
1149 elog(FATAL
, "sys attributes are not supported in caches");
1153 GetCCHashEqFuncs(keytype
,
1154 &cache
->cc_hashfunc
[i
],
1156 &cache
->cc_fastequal
[i
]);
1159 * Do equality-function lookup (we assume this won't need a catalog
1160 * lookup for any supported type)
1162 fmgr_info_cxt(eqfunc
,
1163 &cache
->cc_skey
[i
].sk_func
,
1164 CacheMemoryContext
);
1166 /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
1167 cache
->cc_skey
[i
].sk_attno
= cache
->cc_keyno
[i
];
1169 /* Fill in sk_strategy as well --- always standard equality */
1170 cache
->cc_skey
[i
].sk_strategy
= BTEqualStrategyNumber
;
1171 cache
->cc_skey
[i
].sk_subtype
= InvalidOid
;
1172 /* If a catcache key requires a collation, it must be C collation */
1173 cache
->cc_skey
[i
].sk_collation
= C_COLLATION_OID
;
1175 CACHE_elog(DEBUG2
, "CatalogCacheInitializeCache %s %d %p",
1176 cache
->cc_relname
, i
, cache
);
1180 * mark this cache fully initialized
1182 cache
->cc_tupdesc
= tupdesc
;
1186 * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1188 * One reason to call this routine is to ensure that the relcache has
1189 * created entries for all the catalogs and indexes referenced by catcaches.
1190 * Therefore, provide an option to open the index as well as fixing the
1191 * cache itself. An exception is the indexes on pg_am, which we don't use
1192 * (cf. IndexScanOK).
1195 InitCatCachePhase2(CatCache
*cache
, bool touch_index
)
1197 if (cache
->cc_tupdesc
== NULL
)
1198 CatalogCacheInitializeCache(cache
);
1201 cache
->id
!= AMOID
&&
1202 cache
->id
!= AMNAME
)
1207 * We must lock the underlying catalog before opening the index to
1208 * avoid deadlock, since index_open could possibly result in reading
1209 * this same catalog, and if anyone else is exclusive-locking this
1210 * catalog and index they'll be doing it in that order.
1212 LockRelationOid(cache
->cc_reloid
, AccessShareLock
);
1213 idesc
= index_open(cache
->cc_indexoid
, AccessShareLock
);
1216 * While we've got the index open, let's check that it's unique (and
1217 * not just deferrable-unique, thank you very much). This is just to
1218 * catch thinkos in definitions of new catcaches, so we don't worry
1219 * about the pg_am indexes not getting tested.
1221 Assert(idesc
->rd_index
->indisunique
&&
1222 idesc
->rd_index
->indimmediate
);
1224 index_close(idesc
, AccessShareLock
);
1225 UnlockRelationOid(cache
->cc_reloid
, AccessShareLock
);
1233 * This function checks for tuples that will be fetched by
1234 * IndexSupportInitialize() during relcache initialization for
1235 * certain system indexes that support critical syscaches.
1236 * We can't use an indexscan to fetch these, else we'll get into
1237 * infinite recursion. A plain heap scan will work, however.
1238 * Once we have completed relcache initialization (signaled by
1239 * criticalRelcachesBuilt), we don't have to worry anymore.
1241 * Similarly, during backend startup we have to be able to use the
1242 * pg_authid, pg_auth_members and pg_database syscaches for
1243 * authentication even if we don't yet have relcache entries for those
1244 * catalogs' indexes.
1247 IndexScanOK(CatCache
*cache
)
1254 * Rather than tracking exactly which indexes have to be loaded
1255 * before we can use indexscans (which changes from time to time),
1256 * just force all pg_index searches to be heap scans until we've
1257 * built the critical relcaches.
1259 if (!criticalRelcachesBuilt
)
1267 * Always do heap scans in pg_am, because it's so small there's
1268 * not much point in an indexscan anyway. We *must* do this when
1269 * initially building critical relcache entries, but we might as
1270 * well just always do it.
1276 case AUTHMEMMEMROLE
:
1280 * Protect authentication lookups occurring before relcache has
1281 * collected entries for shared indexes.
1283 if (!criticalSharedRelcachesBuilt
)
1291 /* Normal case, allow index scan */
1298 * This call searches a system cache for a tuple, opening the relation
1299 * if necessary (on the first access to a particular cache).
1301 * The result is NULL if not found, or a pointer to a HeapTuple in
1302 * the cache. The caller must not modify the tuple, and must call
1303 * ReleaseCatCache() when done with it.
1305 * The search key values should be expressed as Datums of the key columns'
1306 * datatype(s). (Pass zeroes for any unused parameters.) As a special
1307 * exception, the passed-in key for a NAME column can be just a C string;
1308 * the caller need not go to the trouble of converting it to a fully
1312 SearchCatCache(CatCache
*cache
,
1318 return SearchCatCacheInternal(cache
, cache
->cc_nkeys
, v1
, v2
, v3
, v4
);
1323 * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1324 * arguments. The compiler can inline the body and unroll loops, making them a
1325 * bit faster than SearchCatCache().
1329 SearchCatCache1(CatCache
*cache
,
1332 return SearchCatCacheInternal(cache
, 1, v1
, 0, 0, 0);
1337 SearchCatCache2(CatCache
*cache
,
1340 return SearchCatCacheInternal(cache
, 2, v1
, v2
, 0, 0);
1345 SearchCatCache3(CatCache
*cache
,
1346 Datum v1
, Datum v2
, Datum v3
)
1348 return SearchCatCacheInternal(cache
, 3, v1
, v2
, v3
, 0);
1353 SearchCatCache4(CatCache
*cache
,
1354 Datum v1
, Datum v2
, Datum v3
, Datum v4
)
1356 return SearchCatCacheInternal(cache
, 4, v1
, v2
, v3
, v4
);
1360 * Work-horse for SearchCatCache/SearchCatCacheN.
1362 static inline HeapTuple
1363 SearchCatCacheInternal(CatCache
*cache
,
1370 Datum arguments
[CATCACHE_MAXKEYS
];
1377 /* Make sure we're in an xact, even if this ends up being a cache hit */
1378 Assert(IsTransactionState());
1380 Assert(cache
->cc_nkeys
== nkeys
);
1383 * one-time startup overhead for each cache
1385 if (unlikely(cache
->cc_tupdesc
== NULL
))
1386 CatalogCacheInitializeCache(cache
);
1388 #ifdef CATCACHE_STATS
1389 cache
->cc_searches
++;
1392 /* Initialize local parameter array */
1399 * find the hash bucket in which to look for the tuple
1401 hashValue
= CatalogCacheComputeHashValue(cache
, nkeys
, v1
, v2
, v3
, v4
);
1402 hashIndex
= HASH_INDEX(hashValue
, cache
->cc_nbuckets
);
1405 * scan the hash bucket until we find a match or exhaust our tuples
1407 * Note: it's okay to use dlist_foreach here, even though we modify the
1408 * dlist within the loop, because we don't continue the loop afterwards.
1410 bucket
= &cache
->cc_bucket
[hashIndex
];
1411 dlist_foreach(iter
, bucket
)
1413 ct
= dlist_container(CatCTup
, cache_elem
, iter
.cur
);
1416 continue; /* ignore dead entries */
1418 if (ct
->hash_value
!= hashValue
)
1419 continue; /* quickly skip entry if wrong hash val */
1421 if (!CatalogCacheCompareTuple(cache
, nkeys
, ct
->keys
, arguments
))
1425 * We found a match in the cache. Move it to the front of the list
1426 * for its hashbucket, in order to speed subsequent searches. (The
1427 * most frequently accessed elements in any hashbucket will tend to be
1428 * near the front of the hashbucket's list.)
1430 dlist_move_head(bucket
, &ct
->cache_elem
);
1433 * If it's a positive entry, bump its refcount and return it. If it's
1434 * negative, we can report failure to the caller.
1438 ResourceOwnerEnlarge(CurrentResourceOwner
);
1440 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner
, &ct
->tuple
);
1442 CACHE_elog(DEBUG2
, "SearchCatCache(%s): found in bucket %d",
1443 cache
->cc_relname
, hashIndex
);
1445 #ifdef CATCACHE_STATS
1453 CACHE_elog(DEBUG2
, "SearchCatCache(%s): found neg entry in bucket %d",
1454 cache
->cc_relname
, hashIndex
);
1456 #ifdef CATCACHE_STATS
1457 cache
->cc_neg_hits
++;
1464 return SearchCatCacheMiss(cache
, nkeys
, hashValue
, hashIndex
, v1
, v2
, v3
, v4
);
1468 * Search the actual catalogs, rather than the cache.
1470 * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1471 * as small as possible. To avoid that effort being undone by a helpful
1472 * compiler, try to explicitly forbid inlining.
1474 static pg_noinline HeapTuple
1475 SearchCatCacheMiss(CatCache
*cache
,
1484 ScanKeyData cur_skey
[CATCACHE_MAXKEYS
];
1486 SysScanDesc scandesc
;
1490 Datum arguments
[CATCACHE_MAXKEYS
];
1492 /* Initialize local parameter array */
1499 * Tuple was not found in cache, so we have to try to retrieve it directly
1500 * from the relation. If found, we will add it to the cache; if not
1501 * found, we will add a negative cache entry instead.
1503 * NOTE: it is possible for recursive cache lookups to occur while reading
1504 * the relation --- for example, due to shared-cache-inval messages being
1505 * processed during table_open(). This is OK. It's even possible for one
1506 * of those lookups to find and enter the very same tuple we are trying to
1507 * fetch here. If that happens, we will enter a second copy of the tuple
1508 * into the cache. The first copy will never be referenced again, and
1509 * will eventually age out of the cache, so there's no functional problem.
1510 * This case is rare enough that it's not worth expending extra cycles to
1513 * Another case, which we *must* handle, is that the tuple could become
1514 * outdated during CatalogCacheCreateEntry's attempt to detoast it (since
1515 * AcceptInvalidationMessages can run during TOAST table access). We do
1516 * not want to return already-stale catcache entries, so we loop around
1517 * and do the table scan again if that happens.
1519 relation
= table_open(cache
->cc_reloid
, AccessShareLock
);
1522 * Ok, need to make a lookup in the relation, copy the scankey and fill
1523 * out any per-call fields.
1525 memcpy(cur_skey
, cache
->cc_skey
, sizeof(ScanKeyData
) * nkeys
);
1526 cur_skey
[0].sk_argument
= v1
;
1527 cur_skey
[1].sk_argument
= v2
;
1528 cur_skey
[2].sk_argument
= v3
;
1529 cur_skey
[3].sk_argument
= v4
;
1533 scandesc
= systable_beginscan(relation
,
1543 while (HeapTupleIsValid(ntp
= systable_getnext(scandesc
)))
1545 ct
= CatalogCacheCreateEntry(cache
, ntp
, NULL
,
1546 hashValue
, hashIndex
);
1547 /* upon failure, we must start the scan over */
1553 /* immediately set the refcount to 1 */
1554 ResourceOwnerEnlarge(CurrentResourceOwner
);
1556 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner
, &ct
->tuple
);
1557 break; /* assume only one match */
1560 systable_endscan(scandesc
);
1563 table_close(relation
, AccessShareLock
);
1566 * If tuple was not found, we need to build a negative cache entry
1567 * containing a fake tuple. The fake tuple has the correct key columns,
1568 * but nulls everywhere else.
1570 * In bootstrap mode, we don't build negative entries, because the cache
1571 * invalidation mechanism isn't alive and can't clear them if the tuple
1572 * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1573 * cache inval for that.)
1577 if (IsBootstrapProcessingMode())
1580 ct
= CatalogCacheCreateEntry(cache
, NULL
, arguments
,
1581 hashValue
, hashIndex
);
1583 /* Creating a negative cache entry shouldn't fail */
1586 CACHE_elog(DEBUG2
, "SearchCatCache(%s): Contains %d/%d tuples",
1587 cache
->cc_relname
, cache
->cc_ntup
, CacheHdr
->ch_ntup
);
1588 CACHE_elog(DEBUG2
, "SearchCatCache(%s): put neg entry in bucket %d",
1589 cache
->cc_relname
, hashIndex
);
1592 * We are not returning the negative entry to the caller, so leave its
1599 CACHE_elog(DEBUG2
, "SearchCatCache(%s): Contains %d/%d tuples",
1600 cache
->cc_relname
, cache
->cc_ntup
, CacheHdr
->ch_ntup
);
1601 CACHE_elog(DEBUG2
, "SearchCatCache(%s): put in bucket %d",
1602 cache
->cc_relname
, hashIndex
);
1604 #ifdef CATCACHE_STATS
1605 cache
->cc_newloads
++;
1614 * Decrement the reference count of a catcache entry (releasing the
1615 * hold grabbed by a successful SearchCatCache).
1617 * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1618 * will be freed as soon as their refcount goes to zero. In combination
1619 * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1620 * to catch references to already-released catcache entries.
1623 ReleaseCatCache(HeapTuple tuple
)
1625 ReleaseCatCacheWithOwner(tuple
, CurrentResourceOwner
);
1629 ReleaseCatCacheWithOwner(HeapTuple tuple
, ResourceOwner resowner
)
1631 CatCTup
*ct
= (CatCTup
*) (((char *) tuple
) -
1632 offsetof(CatCTup
, tuple
));
1634 /* Safety checks to ensure we were handed a cache entry */
1635 Assert(ct
->ct_magic
== CT_MAGIC
);
1636 Assert(ct
->refcount
> 0);
1640 ResourceOwnerForgetCatCacheRef(CurrentResourceOwner
, &ct
->tuple
);
1643 #ifndef CATCACHE_FORCE_RELEASE
1646 ct
->refcount
== 0 &&
1647 (ct
->c_list
== NULL
|| ct
->c_list
->refcount
== 0))
1648 CatCacheRemoveCTup(ct
->my_cache
, ct
);
1653 * GetCatCacheHashValue
1655 * Compute the hash value for a given set of search keys.
1657 * The reason for exposing this as part of the API is that the hash value is
1658 * exposed in cache invalidation operations, so there are places outside the
1659 * catcache code that need to be able to compute the hash values.
1662 GetCatCacheHashValue(CatCache
*cache
,
1669 * one-time startup overhead for each cache
1671 if (cache
->cc_tupdesc
== NULL
)
1672 CatalogCacheInitializeCache(cache
);
1675 * calculate the hash value
1677 return CatalogCacheComputeHashValue(cache
, cache
->cc_nkeys
, v1
, v2
, v3
, v4
);
1682 * SearchCatCacheList
1684 * Generate a list of all tuples matching a partial key (that is,
1685 * a key specifying just the first K of the cache's N key columns).
1687 * It doesn't make any sense to specify all of the cache's key columns
1688 * here: since the key is unique, there could be at most one match, so
1689 * you ought to use SearchCatCache() instead. Hence this function takes
1690 * one fewer Datum argument than SearchCatCache() does.
1692 * The caller must not modify the list object or the pointed-to tuples,
1693 * and must call ReleaseCatCacheList() when done with the list.
1696 SearchCatCacheList(CatCache
*cache
,
1702 Datum v4
= 0; /* dummy last-column value */
1703 Datum arguments
[CATCACHE_MAXKEYS
];
1707 dlist_head
*lbucket
;
1710 List
*volatile ctlist
;
1711 ListCell
*ctlist_item
;
1715 MemoryContext oldcxt
;
1717 CatCInProgress
*save_in_progress
;
1718 CatCInProgress in_progress_ent
;
1721 * one-time startup overhead for each cache
1723 if (unlikely(cache
->cc_tupdesc
== NULL
))
1724 CatalogCacheInitializeCache(cache
);
1726 Assert(nkeys
> 0 && nkeys
< cache
->cc_nkeys
);
1728 #ifdef CATCACHE_STATS
1729 cache
->cc_lsearches
++;
1732 /* Initialize local parameter array */
1739 * If we haven't previously done a list search in this cache, create the
1740 * bucket header array; otherwise, consider whether it's time to enlarge
1743 if (cache
->cc_lbucket
== NULL
)
1745 /* Arbitrary initial size --- must be a power of 2 */
1748 cache
->cc_lbucket
= (dlist_head
*)
1749 MemoryContextAllocZero(CacheMemoryContext
,
1750 nbuckets
* sizeof(dlist_head
));
1751 /* Don't set cc_nlbuckets if we get OOM allocating cc_lbucket */
1752 cache
->cc_nlbuckets
= nbuckets
;
1757 * If the hash table has become too full, enlarge the buckets array.
1758 * Quite arbitrarily, we enlarge when fill factor > 2.
1760 if (cache
->cc_nlist
> cache
->cc_nlbuckets
* 2)
1761 RehashCatCacheLists(cache
);
1765 * Find the hash bucket in which to look for the CatCList.
1767 lHashValue
= CatalogCacheComputeHashValue(cache
, nkeys
, v1
, v2
, v3
, v4
);
1768 lHashIndex
= HASH_INDEX(lHashValue
, cache
->cc_nlbuckets
);
1771 * scan the items until we find a match or exhaust our list
1773 * Note: it's okay to use dlist_foreach here, even though we modify the
1774 * dlist within the loop, because we don't continue the loop afterwards.
1776 lbucket
= &cache
->cc_lbucket
[lHashIndex
];
1777 dlist_foreach(iter
, lbucket
)
1779 cl
= dlist_container(CatCList
, cache_elem
, iter
.cur
);
1782 continue; /* ignore dead entries */
1784 if (cl
->hash_value
!= lHashValue
)
1785 continue; /* quickly skip entry if wrong hash val */
1788 * see if the cached list matches our key.
1790 if (cl
->nkeys
!= nkeys
)
1793 if (!CatalogCacheCompareTuple(cache
, nkeys
, cl
->keys
, arguments
))
1797 * We found a matching list. Move the list to the front of the list
1798 * for its hashbucket, so as to speed subsequent searches. (We do not
1799 * move the members to the fronts of their hashbucket lists, however,
1800 * since there's no point in that unless they are searched for
1803 dlist_move_head(lbucket
, &cl
->cache_elem
);
1805 /* Bump the list's refcount and return it */
1806 ResourceOwnerEnlarge(CurrentResourceOwner
);
1808 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner
, cl
);
1810 CACHE_elog(DEBUG2
, "SearchCatCacheList(%s): found list",
1813 #ifdef CATCACHE_STATS
1821 * List was not found in cache, so we have to build it by reading the
1822 * relation. For each matching tuple found in the relation, use an
1823 * existing cache entry if possible, else build a new one.
1825 * We have to bump the member refcounts temporarily to ensure they won't
1826 * get dropped from the cache while loading other members. We use a PG_TRY
1827 * block to ensure we can undo those refcounts if we get an error before
1828 * we finish constructing the CatCList. ctlist must be valid throughout
1834 * Cache invalidation can happen while we're building the list.
1835 * CatalogCacheCreateEntry() handles concurrent invalidation of individual
1836 * tuples, but it's also possible that a new entry is concurrently added
1837 * that should be part of the list we're building. Register an
1838 * "in-progress" entry that will receive the invalidation, until we have
1839 * built the final list entry.
1841 save_in_progress
= catcache_in_progress_stack
;
1842 in_progress_ent
.next
= catcache_in_progress_stack
;
1843 in_progress_ent
.cache
= cache
;
1844 in_progress_ent
.hash_value
= lHashValue
;
1845 in_progress_ent
.list
= true;
1846 in_progress_ent
.dead
= false;
1847 catcache_in_progress_stack
= &in_progress_ent
;
1851 ScanKeyData cur_skey
[CATCACHE_MAXKEYS
];
1853 SysScanDesc scandesc
;
1854 bool first_iter
= true;
1856 relation
= table_open(cache
->cc_reloid
, AccessShareLock
);
1859 * Ok, need to make a lookup in the relation, copy the scankey and
1860 * fill out any per-call fields.
1862 memcpy(cur_skey
, cache
->cc_skey
, sizeof(ScanKeyData
) * cache
->cc_nkeys
);
1863 cur_skey
[0].sk_argument
= v1
;
1864 cur_skey
[1].sk_argument
= v2
;
1865 cur_skey
[2].sk_argument
= v3
;
1866 cur_skey
[3].sk_argument
= v4
;
1869 * Scan the table for matching entries. If an invalidation arrives
1870 * mid-build, we will loop back here to retry.
1875 * If we are retrying, release refcounts on any items created on
1876 * the previous iteration. We dare not try to free them if
1877 * they're now unreferenced, since an error while doing that would
1878 * result in the PG_CATCH below doing extra refcount decrements.
1879 * Besides, we'll likely re-adopt those items in the next
1880 * iteration, so it's not worth complicating matters to try to get
1883 foreach(ctlist_item
, ctlist
)
1885 ct
= (CatCTup
*) lfirst(ctlist_item
);
1886 Assert(ct
->c_list
== NULL
);
1887 Assert(ct
->refcount
> 0);
1890 /* Reset ctlist in preparation for new try */
1892 in_progress_ent
.dead
= false;
1894 scandesc
= systable_beginscan(relation
,
1901 /* The list will be ordered iff we are doing an index scan */
1902 ordered
= (scandesc
->irel
!= NULL
);
1904 /* Injection point to help testing the recursive invalidation case */
1907 INJECTION_POINT("catcache-list-miss-systable-scan-started");
1911 while (HeapTupleIsValid(ntp
= systable_getnext(scandesc
)) &&
1912 !in_progress_ent
.dead
)
1920 * See if there's an entry for this tuple already.
1923 hashValue
= CatalogCacheComputeTupleHashValue(cache
, cache
->cc_nkeys
, ntp
);
1924 hashIndex
= HASH_INDEX(hashValue
, cache
->cc_nbuckets
);
1926 bucket
= &cache
->cc_bucket
[hashIndex
];
1927 dlist_foreach(iter
, bucket
)
1929 ct
= dlist_container(CatCTup
, cache_elem
, iter
.cur
);
1931 if (ct
->dead
|| ct
->negative
)
1932 continue; /* ignore dead and negative entries */
1934 if (ct
->hash_value
!= hashValue
)
1935 continue; /* quickly skip entry if wrong hash val */
1937 if (!ItemPointerEquals(&(ct
->tuple
.t_self
), &(ntp
->t_self
)))
1938 continue; /* not same tuple */
1941 * Found a match, but can't use it if it belongs to
1942 * another list already
1953 /* We didn't find a usable entry, so make a new one */
1954 ct
= CatalogCacheCreateEntry(cache
, ntp
, NULL
,
1955 hashValue
, hashIndex
);
1957 /* upon failure, we must start the scan over */
1960 in_progress_ent
.dead
= true;
1965 /* Careful here: add entry to ctlist, then bump its refcount */
1966 /* This way leaves state correct if lappend runs out of memory */
1967 ctlist
= lappend(ctlist
, ct
);
1971 systable_endscan(scandesc
);
1972 } while (in_progress_ent
.dead
);
1974 table_close(relation
, AccessShareLock
);
1976 /* Make sure the resource owner has room to remember this entry. */
1977 ResourceOwnerEnlarge(CurrentResourceOwner
);
1979 /* Now we can build the CatCList entry. */
1980 oldcxt
= MemoryContextSwitchTo(CacheMemoryContext
);
1981 nmembers
= list_length(ctlist
);
1983 palloc(offsetof(CatCList
, members
) + nmembers
* sizeof(CatCTup
*));
1985 /* Extract key values */
1986 CatCacheCopyKeys(cache
->cc_tupdesc
, nkeys
, cache
->cc_keyno
,
1987 arguments
, cl
->keys
);
1988 MemoryContextSwitchTo(oldcxt
);
1991 * We are now past the last thing that could trigger an elog before we
1992 * have finished building the CatCList and remembering it in the
1993 * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1994 * we'd better do so before we start marking the members as belonging
2000 Assert(catcache_in_progress_stack
== &in_progress_ent
);
2001 catcache_in_progress_stack
= save_in_progress
;
2003 foreach(ctlist_item
, ctlist
)
2005 ct
= (CatCTup
*) lfirst(ctlist_item
);
2006 Assert(ct
->c_list
== NULL
);
2007 Assert(ct
->refcount
> 0);
2010 #ifndef CATCACHE_FORCE_RELEASE
2013 ct
->refcount
== 0 &&
2014 (ct
->c_list
== NULL
|| ct
->c_list
->refcount
== 0))
2015 CatCacheRemoveCTup(cache
, ct
);
2021 Assert(catcache_in_progress_stack
== &in_progress_ent
);
2022 catcache_in_progress_stack
= save_in_progress
;
2024 cl
->cl_magic
= CL_MAGIC
;
2025 cl
->my_cache
= cache
;
2026 cl
->refcount
= 0; /* for the moment */
2028 cl
->ordered
= ordered
;
2030 cl
->hash_value
= lHashValue
;
2031 cl
->n_members
= nmembers
;
2034 foreach(ctlist_item
, ctlist
)
2036 cl
->members
[i
++] = ct
= (CatCTup
*) lfirst(ctlist_item
);
2037 Assert(ct
->c_list
== NULL
);
2039 /* release the temporary refcount on the member */
2040 Assert(ct
->refcount
> 0);
2042 /* mark list dead if any members already dead */
2046 Assert(i
== nmembers
);
2049 * Add the CatCList to the appropriate bucket, and count it.
2051 dlist_push_head(lbucket
, &cl
->cache_elem
);
2055 /* Finally, bump the list's refcount and return it */
2057 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner
, cl
);
2059 CACHE_elog(DEBUG2
, "SearchCatCacheList(%s): made list of %d members",
2060 cache
->cc_relname
, nmembers
);
2066 * ReleaseCatCacheList
2068 * Decrement the reference count of a catcache list.
2071 ReleaseCatCacheList(CatCList
*list
)
2073 ReleaseCatCacheListWithOwner(list
, CurrentResourceOwner
);
2077 ReleaseCatCacheListWithOwner(CatCList
*list
, ResourceOwner resowner
)
2079 /* Safety checks to ensure we were handed a cache entry */
2080 Assert(list
->cl_magic
== CL_MAGIC
);
2081 Assert(list
->refcount
> 0);
2084 ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner
, list
);
2087 #ifndef CATCACHE_FORCE_RELEASE
2090 list
->refcount
== 0)
2091 CatCacheRemoveCList(list
->my_cache
, list
);
2096 * CatalogCacheCreateEntry
2097 * Create a new CatCTup entry, copying the given HeapTuple and other
2098 * supplied data into it. The new entry initially has refcount 0.
2100 * To create a normal cache entry, ntp must be the HeapTuple just fetched
2101 * from scandesc, and "arguments" is not used. To create a negative cache
2102 * entry, pass NULL for ntp; then "arguments" is the cache keys to use.
2103 * In either case, hashValue/hashIndex are the hash values computed from
2106 * Returns NULL if we attempt to detoast the tuple and observe that it
2107 * became stale. (This cannot happen for a negative entry.) Caller must
2108 * retry the tuple lookup in that case.
2111 CatalogCacheCreateEntry(CatCache
*cache
, HeapTuple ntp
, Datum
*arguments
,
2112 uint32 hashValue
, Index hashIndex
)
2115 MemoryContext oldcxt
;
2120 HeapTuple dtp
= NULL
;
2123 * The invalidation of the in-progress entry essentially never happens
2124 * during our regression tests, and there's no easy way to force it to
2125 * fail for testing purposes. To ensure we have test coverage for the
2126 * retry paths in our callers, make debug builds randomly fail about
2127 * 0.1% of the times through this code path, even when there's no
2130 #ifdef USE_ASSERT_CHECKING
2131 if (pg_prng_uint32(&pg_global_prng_state
) <= (PG_UINT32_MAX
/ 1000))
2136 * If there are any out-of-line toasted fields in the tuple, expand
2137 * them in-line. This saves cycles during later use of the catcache
2138 * entry, and also protects us against the possibility of the toast
2139 * tuples being freed before we attempt to fetch them, in case of
2140 * something using a slightly stale catcache entry.
2142 if (HeapTupleHasExternal(ntp
))
2144 CatCInProgress
*save_in_progress
;
2145 CatCInProgress in_progress_ent
;
2148 * The tuple could become stale while we are doing toast table
2149 * access (since AcceptInvalidationMessages can run then). The
2150 * invalidation will mark our in-progress entry as dead.
2152 save_in_progress
= catcache_in_progress_stack
;
2153 in_progress_ent
.next
= catcache_in_progress_stack
;
2154 in_progress_ent
.cache
= cache
;
2155 in_progress_ent
.hash_value
= hashValue
;
2156 in_progress_ent
.list
= false;
2157 in_progress_ent
.dead
= false;
2158 catcache_in_progress_stack
= &in_progress_ent
;
2162 dtp
= toast_flatten_tuple(ntp
, cache
->cc_tupdesc
);
2166 Assert(catcache_in_progress_stack
== &in_progress_ent
);
2167 catcache_in_progress_stack
= save_in_progress
;
2171 if (in_progress_ent
.dead
)
2173 heap_freetuple(dtp
);
2180 /* Allocate memory for CatCTup and the cached tuple in one go */
2181 oldcxt
= MemoryContextSwitchTo(CacheMemoryContext
);
2183 ct
= (CatCTup
*) palloc(sizeof(CatCTup
) +
2184 MAXIMUM_ALIGNOF
+ dtp
->t_len
);
2185 ct
->tuple
.t_len
= dtp
->t_len
;
2186 ct
->tuple
.t_self
= dtp
->t_self
;
2187 ct
->tuple
.t_tableOid
= dtp
->t_tableOid
;
2188 ct
->tuple
.t_data
= (HeapTupleHeader
)
2189 MAXALIGN(((char *) ct
) + sizeof(CatCTup
));
2190 /* copy tuple contents */
2191 memcpy((char *) ct
->tuple
.t_data
,
2192 (const char *) dtp
->t_data
,
2194 MemoryContextSwitchTo(oldcxt
);
2197 heap_freetuple(dtp
);
2199 /* extract keys - they'll point into the tuple if not by-value */
2200 for (i
= 0; i
< cache
->cc_nkeys
; i
++)
2205 atp
= heap_getattr(&ct
->tuple
,
2215 /* Set up keys for a negative cache entry */
2216 oldcxt
= MemoryContextSwitchTo(CacheMemoryContext
);
2217 ct
= (CatCTup
*) palloc(sizeof(CatCTup
));
2220 * Store keys - they'll point into separately allocated memory if not
2223 CatCacheCopyKeys(cache
->cc_tupdesc
, cache
->cc_nkeys
, cache
->cc_keyno
,
2224 arguments
, ct
->keys
);
2225 MemoryContextSwitchTo(oldcxt
);
2229 * Finish initializing the CatCTup header, and add it to the cache's
2230 * linked list and counts.
2232 ct
->ct_magic
= CT_MAGIC
;
2233 ct
->my_cache
= cache
;
2235 ct
->refcount
= 0; /* for the moment */
2237 ct
->negative
= (ntp
== NULL
);
2238 ct
->hash_value
= hashValue
;
2240 dlist_push_head(&cache
->cc_bucket
[hashIndex
], &ct
->cache_elem
);
2243 CacheHdr
->ch_ntup
++;
2246 * If the hash table has become too full, enlarge the buckets array. Quite
2247 * arbitrarily, we enlarge when fill factor > 2.
2249 if (cache
->cc_ntup
> cache
->cc_nbuckets
* 2)
2250 RehashCatCache(cache
);
2256 * Helper routine that frees keys stored in the keys array.
2259 CatCacheFreeKeys(TupleDesc tupdesc
, int nkeys
, int *attnos
, Datum
*keys
)
2263 for (i
= 0; i
< nkeys
; i
++)
2265 int attnum
= attnos
[i
];
2266 Form_pg_attribute att
;
2268 /* system attribute are not supported in caches */
2271 att
= TupleDescAttr(tupdesc
, attnum
- 1);
2274 pfree(DatumGetPointer(keys
[i
]));
2279 * Helper routine that copies the keys in the srckeys array into the dstkeys
2280 * one, guaranteeing that the datums are fully allocated in the current memory
2284 CatCacheCopyKeys(TupleDesc tupdesc
, int nkeys
, int *attnos
,
2285 Datum
*srckeys
, Datum
*dstkeys
)
2290 * XXX: memory and lookup performance could possibly be improved by
2291 * storing all keys in one allocation.
2294 for (i
= 0; i
< nkeys
; i
++)
2296 int attnum
= attnos
[i
];
2297 Form_pg_attribute att
= TupleDescAttr(tupdesc
, attnum
- 1);
2298 Datum src
= srckeys
[i
];
2302 * Must be careful in case the caller passed a C string where a NAME
2303 * is wanted: convert the given argument to a correctly padded NAME.
2304 * Otherwise the memcpy() done by datumCopy() could fall off the end
2307 if (att
->atttypid
== NAMEOID
)
2309 namestrcpy(&srcname
, DatumGetCString(src
));
2310 src
= NameGetDatum(&srcname
);
2313 dstkeys
[i
] = datumCopy(src
,
2320 * PrepareToInvalidateCacheTuple()
2322 * This is part of a rather subtle chain of events, so pay attention:
2324 * When a tuple is inserted or deleted, it cannot be flushed from the
2325 * catcaches immediately, for reasons explained at the top of cache/inval.c.
2326 * Instead we have to add entry(s) for the tuple to a list of pending tuple
2327 * invalidations that will be done at the end of the command or transaction.
2329 * The lists of tuples that need to be flushed are kept by inval.c. This
2330 * routine is a helper routine for inval.c. Given a tuple belonging to
2331 * the specified relation, find all catcaches it could be in, compute the
2332 * correct hash value for each such catcache, and call the specified
2333 * function to record the cache id and hash value in inval.c's lists.
2334 * SysCacheInvalidate will be called later, if appropriate,
2335 * using the recorded information.
2337 * For an insert or delete, tuple is the target tuple and newtuple is NULL.
2338 * For an update, we are called just once, with tuple being the old tuple
2339 * version and newtuple the new version. We should make two list entries
2340 * if the tuple's hash value changed, but only one if it didn't.
2342 * Note that it is irrelevant whether the given tuple is actually loaded
2343 * into the catcache at the moment. Even if it's not there now, it might
2344 * be by the end of the command, or there might be a matching negative entry
2345 * to flush --- or other backends' caches might have such entries --- so
2346 * we have to make list entries to flush it later.
2348 * Also note that it's not an error if there are no catcaches for the
2349 * specified relation. inval.c doesn't know exactly which rels have
2350 * catcaches --- it will call this routine for any tuple that's in a
2354 PrepareToInvalidateCacheTuple(Relation relation
,
2357 void (*function
) (int, uint32
, Oid
, void *),
2363 CACHE_elog(DEBUG2
, "PrepareToInvalidateCacheTuple: called");
2368 Assert(RelationIsValid(relation
));
2369 Assert(HeapTupleIsValid(tuple
));
2370 Assert(PointerIsValid(function
));
2371 Assert(CacheHdr
!= NULL
);
2373 reloid
= RelationGetRelid(relation
);
2377 * if the cache contains tuples from the specified relation
2378 * compute the tuple's hash value(s) in this cache,
2379 * and call the passed function to register the information.
2383 slist_foreach(iter
, &CacheHdr
->ch_caches
)
2385 CatCache
*ccp
= slist_container(CatCache
, cc_next
, iter
.cur
);
2389 if (ccp
->cc_reloid
!= reloid
)
2392 /* Just in case cache hasn't finished initialization yet... */
2393 if (ccp
->cc_tupdesc
== NULL
)
2394 CatalogCacheInitializeCache(ccp
);
2396 hashvalue
= CatalogCacheComputeTupleHashValue(ccp
, ccp
->cc_nkeys
, tuple
);
2397 dbid
= ccp
->cc_relisshared
? (Oid
) 0 : MyDatabaseId
;
2399 (*function
) (ccp
->id
, hashvalue
, dbid
, context
);
2403 uint32 newhashvalue
;
2405 newhashvalue
= CatalogCacheComputeTupleHashValue(ccp
, ccp
->cc_nkeys
, newtuple
);
2407 if (newhashvalue
!= hashvalue
)
2408 (*function
) (ccp
->id
, newhashvalue
, dbid
, context
);
2413 /* ResourceOwner callbacks */
2416 ResOwnerReleaseCatCache(Datum res
)
2418 ReleaseCatCacheWithOwner((HeapTuple
) DatumGetPointer(res
), NULL
);
2422 ResOwnerPrintCatCache(Datum res
)
2424 HeapTuple tuple
= (HeapTuple
) DatumGetPointer(res
);
2425 CatCTup
*ct
= (CatCTup
*) (((char *) tuple
) -
2426 offsetof(CatCTup
, tuple
));
2428 /* Safety check to ensure we were handed a cache entry */
2429 Assert(ct
->ct_magic
== CT_MAGIC
);
2431 return psprintf("cache %s (%d), tuple %u/%u has count %d",
2432 ct
->my_cache
->cc_relname
, ct
->my_cache
->id
,
2433 ItemPointerGetBlockNumber(&(tuple
->t_self
)),
2434 ItemPointerGetOffsetNumber(&(tuple
->t_self
)),
2439 ResOwnerReleaseCatCacheList(Datum res
)
2441 ReleaseCatCacheListWithOwner((CatCList
*) DatumGetPointer(res
), NULL
);
2445 ResOwnerPrintCatCacheList(Datum res
)
2447 CatCList
*list
= (CatCList
*) DatumGetPointer(res
);
2449 return psprintf("cache %s (%d), list %p has count %d",
2450 list
->my_cache
->cc_relname
, list
->my_cache
->id
,
2451 list
, list
->refcount
);