1 /*-------------------------------------------------------------------------
4 * Catalog routines used by pg_dump; long ago these were shared
5 * by another dump tool, but not anymore.
7 * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
12 * src/bin/pg_dump/common.c
14 *-------------------------------------------------------------------------
16 #include "postgres_fe.h"
20 #include "catalog/pg_class_d.h"
21 #include "catalog/pg_collation_d.h"
22 #include "catalog/pg_extension_d.h"
23 #include "catalog/pg_namespace_d.h"
24 #include "catalog/pg_operator_d.h"
25 #include "catalog/pg_proc_d.h"
26 #include "catalog/pg_publication_d.h"
27 #include "catalog/pg_subscription_d.h"
28 #include "catalog/pg_type_d.h"
29 #include "common/hashfn.h"
30 #include "pg_backup_utils.h"
34 * Variables for mapping DumpId to DumpableObject
36 static DumpableObject
**dumpIdMap
= NULL
;
37 static int allocedDumpIds
= 0;
38 static DumpId lastDumpId
= 0; /* Note: 0 is InvalidDumpId */
41 * Infrastructure for mapping CatalogId to DumpableObject
43 * We use a hash table generated by simplehash.h. That infrastructure
44 * requires all the hash table entries to be the same size, and it also
45 * expects that it can move them around when resizing the table. So we
46 * cannot make the DumpableObjects be elements of the hash table directly;
47 * instead, the hash table elements contain pointers to DumpableObjects.
48 * This does have the advantage of letting us map multiple CatalogIds
49 * to one DumpableObject, which is useful for blobs.
51 * It turns out to be convenient to also use this data structure to map
52 * CatalogIds to owning extensions, if any. Since extension membership
53 * data is read before creating most DumpableObjects, either one of dobj
54 * and ext could be NULL.
56 typedef struct _catalogIdMapEntry
58 CatalogId catId
; /* the indexed CatalogId */
59 uint32 status
; /* hash status */
60 uint32 hashval
; /* hash code for the CatalogId */
61 DumpableObject
*dobj
; /* the associated DumpableObject, if any */
62 ExtensionInfo
*ext
; /* owning extension, if any */
65 #define SH_PREFIX catalogid
66 #define SH_ELEMENT_TYPE CatalogIdMapEntry
67 #define SH_KEY_TYPE CatalogId
69 #define SH_HASH_KEY(tb, key) hash_bytes((const unsigned char *) &(key), sizeof(CatalogId))
70 #define SH_EQUAL(tb, a, b) ((a).oid == (b).oid && (a).tableoid == (b).tableoid)
72 #define SH_GET_HASH(tb, a) (a)->hashval
73 #define SH_SCOPE static inline
74 #define SH_RAW_ALLOCATOR pg_malloc0
77 #include "lib/simplehash.h"
79 #define CATALOGIDHASH_INITIAL_SIZE 10000
81 static catalogid_hash
*catalogIdHash
= NULL
;
83 static void flagInhTables(Archive
*fout
, TableInfo
*tblinfo
, int numTables
,
84 InhInfo
*inhinfo
, int numInherits
);
85 static void flagInhIndexes(Archive
*fout
, TableInfo
*tblinfo
, int numTables
);
86 static void flagInhAttrs(Archive
*fout
, DumpOptions
*dopt
, TableInfo
*tblinfo
,
88 static int strInArray(const char *pattern
, char **arr
, int arr_size
);
89 static IndxInfo
*findIndexByOid(Oid oid
);
94 * Collect information about all potentially dumpable objects
97 getSchemaData(Archive
*fout
, int *numTablesPtr
)
100 ExtensionInfo
*extinfo
;
107 * We must read extensions and extension membership info first, because
108 * extension membership needs to be consultable during decisions about
109 * whether other objects are to be dumped.
111 pg_log_info("reading extensions");
112 extinfo
= getExtensions(fout
, &numExtensions
);
114 pg_log_info("identifying extension members");
115 getExtensionMembership(fout
, extinfo
, numExtensions
);
117 pg_log_info("reading schemas");
121 * getTables should be done as soon as possible, so as to minimize the
122 * window between starting our transaction and acquiring per-table locks.
123 * However, we have to do getNamespaces first because the tables get
124 * linked to their containing namespaces during getTables.
126 pg_log_info("reading user-defined tables");
127 tblinfo
= getTables(fout
, &numTables
);
129 getOwnedSeqs(fout
, tblinfo
, numTables
);
131 pg_log_info("reading user-defined functions");
134 /* this must be after getTables and getFuncs */
135 pg_log_info("reading user-defined types");
138 /* this must be after getFuncs, too */
139 pg_log_info("reading procedural languages");
142 pg_log_info("reading user-defined aggregate functions");
145 pg_log_info("reading user-defined operators");
148 pg_log_info("reading user-defined access methods");
149 getAccessMethods(fout
);
151 pg_log_info("reading user-defined operator classes");
154 pg_log_info("reading user-defined operator families");
157 pg_log_info("reading user-defined text search parsers");
160 pg_log_info("reading user-defined text search templates");
161 getTSTemplates(fout
);
163 pg_log_info("reading user-defined text search dictionaries");
164 getTSDictionaries(fout
);
166 pg_log_info("reading user-defined text search configurations");
167 getTSConfigurations(fout
);
169 pg_log_info("reading user-defined foreign-data wrappers");
170 getForeignDataWrappers(fout
);
172 pg_log_info("reading user-defined foreign servers");
173 getForeignServers(fout
);
175 pg_log_info("reading default privileges");
176 getDefaultACLs(fout
);
178 pg_log_info("reading user-defined collations");
181 pg_log_info("reading user-defined conversions");
182 getConversions(fout
);
184 pg_log_info("reading type casts");
187 pg_log_info("reading transforms");
190 pg_log_info("reading table inheritance information");
191 inhinfo
= getInherits(fout
, &numInherits
);
193 pg_log_info("reading event triggers");
194 getEventTriggers(fout
);
196 /* Identify extension configuration tables that should be dumped */
197 pg_log_info("finding extension tables");
198 processExtensionTables(fout
, extinfo
, numExtensions
);
200 /* Link tables to parents, mark parents of target tables interesting */
201 pg_log_info("finding inheritance relationships");
202 flagInhTables(fout
, tblinfo
, numTables
, inhinfo
, numInherits
);
204 pg_log_info("reading column info for interesting tables");
205 getTableAttrs(fout
, tblinfo
, numTables
);
207 pg_log_info("flagging inherited columns in subtables");
208 flagInhAttrs(fout
, fout
->dopt
, tblinfo
, numTables
);
210 pg_log_info("reading partitioning data");
211 getPartitioningInfo(fout
);
213 pg_log_info("reading indexes");
214 getIndexes(fout
, tblinfo
, numTables
);
216 pg_log_info("flagging indexes in partitioned tables");
217 flagInhIndexes(fout
, tblinfo
, numTables
);
219 pg_log_info("reading extended statistics");
220 getExtendedStatistics(fout
);
222 pg_log_info("reading constraints");
223 getConstraints(fout
, tblinfo
, numTables
);
225 pg_log_info("reading triggers");
226 getTriggers(fout
, tblinfo
, numTables
);
228 pg_log_info("reading rewrite rules");
231 pg_log_info("reading policies");
232 getPolicies(fout
, tblinfo
, numTables
);
234 pg_log_info("reading publications");
235 getPublications(fout
);
237 pg_log_info("reading publication membership of tables");
238 getPublicationTables(fout
, tblinfo
, numTables
);
240 pg_log_info("reading publication membership of schemas");
241 getPublicationNamespaces(fout
);
243 pg_log_info("reading subscriptions");
244 getSubscriptions(fout
);
246 pg_log_info("reading subscription membership of tables");
247 getSubscriptionTables(fout
);
249 free(inhinfo
); /* not needed any longer */
251 *numTablesPtr
= numTables
;
256 * Fill in parent link fields of tables for which we need that information,
257 * mark parents of target tables as interesting, and create
258 * TableAttachInfo objects for partitioned tables with appropriate
261 * Note that only direct ancestors of targets are marked interesting.
262 * This is sufficient; we don't much care whether they inherited their
268 flagInhTables(Archive
*fout
, TableInfo
*tblinfo
, int numTables
,
269 InhInfo
*inhinfo
, int numInherits
)
271 TableInfo
*child
= NULL
;
272 TableInfo
*parent
= NULL
;
277 * Set up links from child tables to their parents.
279 * We used to attempt to skip this work for tables that are not to be
280 * dumped; but the optimizable cases are rare in practice, and setting up
281 * these links in bulk is cheaper than the old way. (Note in particular
282 * that it's very rare for a child to have more than one parent.)
284 for (i
= 0; i
< numInherits
; i
++)
287 * Skip a hashtable lookup if it's same table as last time. This is
288 * unlikely for the child, but less so for the parent. (Maybe we
289 * should ask the backend for a sorted array to make it more likely?
290 * Not clear the sorting effort would be repaid, though.)
293 child
->dobj
.catId
.oid
!= inhinfo
[i
].inhrelid
)
295 child
= findTableByOid(inhinfo
[i
].inhrelid
);
298 * If we find no TableInfo, assume the pg_inherits entry is for a
299 * partitioned index, which we don't need to track.
304 if (parent
== NULL
||
305 parent
->dobj
.catId
.oid
!= inhinfo
[i
].inhparent
)
307 parent
= findTableByOid(inhinfo
[i
].inhparent
);
309 pg_fatal("failed sanity check, parent OID %u of table \"%s\" (OID %u) not found",
310 inhinfo
[i
].inhparent
,
312 child
->dobj
.catId
.oid
);
314 /* Add this parent to the child's list of parents. */
315 if (child
->numParents
> 0)
316 child
->parents
= pg_realloc_array(child
->parents
,
318 child
->numParents
+ 1);
320 child
->parents
= pg_malloc_array(TableInfo
*, 1);
321 child
->parents
[child
->numParents
++] = parent
;
325 * Now consider all child tables and mark parents interesting as needed.
327 for (i
= 0; i
< numTables
; i
++)
330 * If needed, mark the parents as interesting for getTableAttrs and
331 * getIndexes. We only need this for direct parents of dumpable
334 if (tblinfo
[i
].dobj
.dump
)
336 int numParents
= tblinfo
[i
].numParents
;
337 TableInfo
**parents
= tblinfo
[i
].parents
;
339 for (j
= 0; j
< numParents
; j
++)
340 parents
[j
]->interesting
= true;
343 /* Create TableAttachInfo object if needed */
344 if ((tblinfo
[i
].dobj
.dump
& DUMP_COMPONENT_DEFINITION
) &&
345 tblinfo
[i
].ispartition
)
347 TableAttachInfo
*attachinfo
;
349 /* With partitions there can only be one parent */
350 if (tblinfo
[i
].numParents
!= 1)
351 pg_fatal("invalid number of parents %d for table \"%s\"",
352 tblinfo
[i
].numParents
,
353 tblinfo
[i
].dobj
.name
);
355 attachinfo
= (TableAttachInfo
*) palloc(sizeof(TableAttachInfo
));
356 attachinfo
->dobj
.objType
= DO_TABLE_ATTACH
;
357 attachinfo
->dobj
.catId
.tableoid
= 0;
358 attachinfo
->dobj
.catId
.oid
= 0;
359 AssignDumpId(&attachinfo
->dobj
);
360 attachinfo
->dobj
.name
= pg_strdup(tblinfo
[i
].dobj
.name
);
361 attachinfo
->dobj
.namespace = tblinfo
[i
].dobj
.namespace;
362 attachinfo
->parentTbl
= tblinfo
[i
].parents
[0];
363 attachinfo
->partitionTbl
= &tblinfo
[i
];
366 * We must state the DO_TABLE_ATTACH object's dependencies
367 * explicitly, since it will not match anything in pg_depend.
369 * Give it dependencies on both the partition table and the parent
370 * table, so that it will not be executed till both of those
371 * exist. (There's no need to care what order those are created
374 addObjectDependency(&attachinfo
->dobj
, tblinfo
[i
].dobj
.dumpId
);
375 addObjectDependency(&attachinfo
->dobj
, tblinfo
[i
].parents
[0]->dobj
.dumpId
);
382 * Create IndexAttachInfo objects for partitioned indexes, and add
383 * appropriate dependency links.
386 flagInhIndexes(Archive
*fout
, TableInfo tblinfo
[], int numTables
)
391 for (i
= 0; i
< numTables
; i
++)
393 if (!tblinfo
[i
].ispartition
|| tblinfo
[i
].numParents
== 0)
396 Assert(tblinfo
[i
].numParents
== 1);
398 for (j
= 0; j
< tblinfo
[i
].numIndexes
; j
++)
400 IndxInfo
*index
= &(tblinfo
[i
].indexes
[j
]);
402 IndexAttachInfo
*attachinfo
;
404 if (index
->parentidx
== 0)
407 parentidx
= findIndexByOid(index
->parentidx
);
408 if (parentidx
== NULL
)
411 attachinfo
= pg_malloc_object(IndexAttachInfo
);
413 attachinfo
->dobj
.objType
= DO_INDEX_ATTACH
;
414 attachinfo
->dobj
.catId
.tableoid
= 0;
415 attachinfo
->dobj
.catId
.oid
= 0;
416 AssignDumpId(&attachinfo
->dobj
);
417 attachinfo
->dobj
.name
= pg_strdup(index
->dobj
.name
);
418 attachinfo
->dobj
.namespace = index
->indextable
->dobj
.namespace;
419 attachinfo
->parentIdx
= parentidx
;
420 attachinfo
->partitionIdx
= index
;
423 * We must state the DO_INDEX_ATTACH object's dependencies
424 * explicitly, since it will not match anything in pg_depend.
426 * Give it dependencies on both the partition index and the parent
427 * index, so that it will not be executed till both of those
428 * exist. (There's no need to care what order those are created
431 * In addition, give it dependencies on the indexes' underlying
432 * tables. This does nothing of great value so far as serial
433 * restore ordering goes, but it ensures that a parallel restore
434 * will not try to run the ATTACH concurrently with other
435 * operations on those tables.
437 addObjectDependency(&attachinfo
->dobj
, index
->dobj
.dumpId
);
438 addObjectDependency(&attachinfo
->dobj
, parentidx
->dobj
.dumpId
);
439 addObjectDependency(&attachinfo
->dobj
,
440 index
->indextable
->dobj
.dumpId
);
441 addObjectDependency(&attachinfo
->dobj
,
442 parentidx
->indextable
->dobj
.dumpId
);
444 /* keep track of the list of partitions in the parent index */
445 simple_ptr_list_append(&parentidx
->partattaches
, &attachinfo
->dobj
);
451 * for each dumpable table in tblinfo, flag its inherited attributes
453 * What we need to do here is:
455 * - Detect child columns that inherit NOT NULL bits from their parents, so
456 * that we needn't specify that again for the child. (Versions >= 18 no
459 * - Detect child columns that have DEFAULT NULL when their parents had some
460 * non-null default. In this case, we make up a dummy AttrDefInfo object so
461 * that we'll correctly emit the necessary DEFAULT NULL clause; otherwise
462 * the backend will apply an inherited default to the column.
464 * - Detect child columns that have a generation expression and all their
465 * parents also have the same generation expression, and if so suppress the
466 * child's expression. The child will inherit the generation expression
467 * automatically, so there's no need to dump it. This improves the dump's
468 * compatibility with pre-v16 servers, which didn't allow the child's
469 * expression to be given explicitly. Exceptions: If it's a partition or
470 * we are in binary upgrade mode, we dump such expressions anyway because
471 * in those cases inherited tables are recreated standalone first and then
472 * reattached to the parent. (See also the logic in dumpTableSchema().)
477 flagInhAttrs(Archive
*fout
, DumpOptions
*dopt
, TableInfo
*tblinfo
, int numTables
)
484 * We scan the tables in OID order, since that's how tblinfo[] is sorted.
485 * Hence we will typically visit parents before their children --- but
486 * that is *not* guaranteed. Thus this loop must be careful that it does
487 * not alter table properties in a way that could change decisions made at
488 * child tables during other iterations.
490 for (i
= 0; i
< numTables
; i
++)
492 TableInfo
*tbinfo
= &(tblinfo
[i
]);
496 /* Some kinds never have parents */
497 if (tbinfo
->relkind
== RELKIND_SEQUENCE
||
498 tbinfo
->relkind
== RELKIND_VIEW
||
499 tbinfo
->relkind
== RELKIND_MATVIEW
)
502 /* Don't bother computing anything for non-target tables, either */
503 if (!tbinfo
->dobj
.dump
)
506 numParents
= tbinfo
->numParents
;
507 parents
= tbinfo
->parents
;
510 continue; /* nothing to see here, move along */
512 /* For each column, search for matching column names in parent(s) */
513 for (j
= 0; j
< tbinfo
->numatts
; j
++)
515 bool foundNotNull
; /* Attr was NOT NULL in a parent */
516 bool foundDefault
; /* Found a default in a parent */
517 bool foundSameGenerated
; /* Found matching GENERATED */
518 bool foundDiffGenerated
; /* Found non-matching GENERATED */
520 /* no point in examining dropped columns */
521 if (tbinfo
->attisdropped
[j
])
524 foundNotNull
= false;
525 foundDefault
= false;
526 foundSameGenerated
= false;
527 foundDiffGenerated
= false;
528 for (k
= 0; k
< numParents
; k
++)
530 TableInfo
*parent
= parents
[k
];
533 inhAttrInd
= strInArray(tbinfo
->attnames
[j
],
538 AttrDefInfo
*parentDef
= parent
->attrdefs
[inhAttrInd
];
541 * Account for each parent having a not-null constraint.
542 * In versions 18 and later, we don't need this (and those
543 * didn't have NO INHERIT.)
545 if (fout
->remoteVersion
< 180000 &&
546 parent
->notnull_constrs
[inhAttrInd
] != NULL
)
549 foundDefault
|= (parentDef
!= NULL
&&
550 strcmp(parentDef
->adef_expr
, "NULL") != 0 &&
551 !parent
->attgenerated
[inhAttrInd
]);
552 if (parent
->attgenerated
[inhAttrInd
])
554 /* these pointer nullness checks are just paranoia */
555 if (parentDef
!= NULL
&&
556 tbinfo
->attrdefs
[j
] != NULL
&&
557 strcmp(parentDef
->adef_expr
,
558 tbinfo
->attrdefs
[j
]->adef_expr
) == 0)
559 foundSameGenerated
= true;
561 foundDiffGenerated
= true;
567 * In versions < 18, for lack of a better system, we arbitrarily
568 * decide that a not-null constraint is not locally defined if at
569 * least one of the parents has it.
571 if (fout
->remoteVersion
< 180000 && foundNotNull
)
572 tbinfo
->notnull_islocal
[j
] = false;
575 * Manufacture a DEFAULT NULL clause if necessary. This breaks
576 * the advice given above to avoid changing state that might get
577 * inspected in other loop iterations. We prevent trouble by
578 * having the foundDefault test above check whether adef_expr is
579 * "NULL", so that it will reach the same conclusion before or
580 * after this is done.
582 if (foundDefault
&& tbinfo
->attrdefs
[j
] == NULL
)
584 AttrDefInfo
*attrDef
;
586 attrDef
= pg_malloc_object(AttrDefInfo
);
587 attrDef
->dobj
.objType
= DO_ATTRDEF
;
588 attrDef
->dobj
.catId
.tableoid
= 0;
589 attrDef
->dobj
.catId
.oid
= 0;
590 AssignDumpId(&attrDef
->dobj
);
591 attrDef
->dobj
.name
= pg_strdup(tbinfo
->dobj
.name
);
592 attrDef
->dobj
.namespace = tbinfo
->dobj
.namespace;
593 attrDef
->dobj
.dump
= tbinfo
->dobj
.dump
;
595 attrDef
->adtable
= tbinfo
;
596 attrDef
->adnum
= j
+ 1;
597 attrDef
->adef_expr
= pg_strdup("NULL");
599 /* Will column be dumped explicitly? */
600 if (shouldPrintColumn(dopt
, tbinfo
, j
))
602 attrDef
->separate
= false;
603 /* No dependency needed: NULL cannot have dependencies */
607 /* column will be suppressed, print default separately */
608 attrDef
->separate
= true;
609 /* ensure it comes out after the table */
610 addObjectDependency(&attrDef
->dobj
,
611 tbinfo
->dobj
.dumpId
);
614 tbinfo
->attrdefs
[j
] = attrDef
;
617 /* No need to dump generation expression if it's inheritable */
618 if (foundSameGenerated
&& !foundDiffGenerated
&&
619 !tbinfo
->ispartition
&& !dopt
->binary_upgrade
)
620 tbinfo
->attrdefs
[j
]->dobj
.dump
= DUMP_COMPONENT_NONE
;
627 * Given a newly-created dumpable object, assign a dump ID,
628 * and enter the object into the lookup tables.
630 * The caller is expected to have filled in objType and catId,
631 * but not any of the other standard fields of a DumpableObject.
634 AssignDumpId(DumpableObject
*dobj
)
636 dobj
->dumpId
= ++lastDumpId
;
637 dobj
->name
= NULL
; /* must be set later */
638 dobj
->namespace = NULL
; /* may be set later */
639 dobj
->dump
= DUMP_COMPONENT_ALL
; /* default assumption */
640 dobj
->dump_contains
= DUMP_COMPONENT_ALL
; /* default assumption */
641 /* All objects have definitions; we may set more components bits later */
642 dobj
->components
= DUMP_COMPONENT_DEFINITION
;
643 dobj
->ext_member
= false; /* default assumption */
644 dobj
->depends_on_ext
= false; /* default assumption */
645 dobj
->dependencies
= NULL
;
649 /* Add object to dumpIdMap[], enlarging that array if need be */
650 while (dobj
->dumpId
>= allocedDumpIds
)
654 if (allocedDumpIds
<= 0)
657 dumpIdMap
= pg_malloc_array(DumpableObject
*, newAlloc
);
661 newAlloc
= allocedDumpIds
* 2;
662 dumpIdMap
= pg_realloc_array(dumpIdMap
, DumpableObject
*, newAlloc
);
664 memset(dumpIdMap
+ allocedDumpIds
, 0,
665 (newAlloc
- allocedDumpIds
) * sizeof(DumpableObject
*));
666 allocedDumpIds
= newAlloc
;
668 dumpIdMap
[dobj
->dumpId
] = dobj
;
670 /* If it has a valid CatalogId, enter it into the hash table */
671 if (OidIsValid(dobj
->catId
.tableoid
))
673 CatalogIdMapEntry
*entry
;
676 /* Initialize CatalogId hash table if not done yet */
677 if (catalogIdHash
== NULL
)
678 catalogIdHash
= catalogid_create(CATALOGIDHASH_INITIAL_SIZE
, NULL
);
680 entry
= catalogid_insert(catalogIdHash
, dobj
->catId
, &found
);
686 Assert(entry
->dobj
== NULL
);
692 * recordAdditionalCatalogID
693 * Record an additional catalog ID for the given DumpableObject
696 recordAdditionalCatalogID(CatalogId catId
, DumpableObject
*dobj
)
698 CatalogIdMapEntry
*entry
;
701 /* CatalogId hash table must exist, if we have a DumpableObject */
702 Assert(catalogIdHash
!= NULL
);
704 /* Add reference to CatalogId hash */
705 entry
= catalogid_insert(catalogIdHash
, catId
, &found
);
711 Assert(entry
->dobj
== NULL
);
716 * Assign a DumpId that's not tied to a DumpableObject.
718 * This is used when creating a "fixed" ArchiveEntry that doesn't need to
719 * participate in the sorting logic.
728 * Return the largest DumpId so far assigned
737 * Find a DumpableObject by dump ID
739 * Returns NULL for invalid ID
742 findObjectByDumpId(DumpId dumpId
)
744 if (dumpId
<= 0 || dumpId
>= allocedDumpIds
)
745 return NULL
; /* out of range? */
746 return dumpIdMap
[dumpId
];
750 * Find a DumpableObject by catalog ID
752 * Returns NULL for unknown ID
755 findObjectByCatalogId(CatalogId catalogId
)
757 CatalogIdMapEntry
*entry
;
759 if (catalogIdHash
== NULL
)
760 return NULL
; /* no objects exist yet */
762 entry
= catalogid_lookup(catalogIdHash
, catalogId
);
769 * Build an array of pointers to all known dumpable objects
771 * This simply creates a modifiable copy of the internal map.
774 getDumpableObjects(DumpableObject
***objs
, int *numObjs
)
779 *objs
= pg_malloc_array(DumpableObject
*, allocedDumpIds
);
781 for (i
= 1; i
< allocedDumpIds
; i
++)
784 (*objs
)[j
++] = dumpIdMap
[i
];
790 * Add a dependency link to a DumpableObject
792 * Note: duplicate dependencies are currently not eliminated
795 addObjectDependency(DumpableObject
*dobj
, DumpId refId
)
797 if (dobj
->nDeps
>= dobj
->allocDeps
)
799 if (dobj
->allocDeps
<= 0)
801 dobj
->allocDeps
= 16;
802 dobj
->dependencies
= pg_malloc_array(DumpId
, dobj
->allocDeps
);
806 dobj
->allocDeps
*= 2;
807 dobj
->dependencies
= pg_realloc_array(dobj
->dependencies
,
808 DumpId
, dobj
->allocDeps
);
811 dobj
->dependencies
[dobj
->nDeps
++] = refId
;
815 * Remove a dependency link from a DumpableObject
817 * If there are multiple links, all are removed
820 removeObjectDependency(DumpableObject
*dobj
, DumpId refId
)
825 for (i
= 0; i
< dobj
->nDeps
; i
++)
827 if (dobj
->dependencies
[i
] != refId
)
828 dobj
->dependencies
[j
++] = dobj
->dependencies
[i
];
836 * finds the DumpableObject for the table with the given oid
837 * returns NULL if not found
840 findTableByOid(Oid oid
)
843 DumpableObject
*dobj
;
845 catId
.tableoid
= RelationRelationId
;
847 dobj
= findObjectByCatalogId(catId
);
848 Assert(dobj
== NULL
|| dobj
->objType
== DO_TABLE
);
849 return (TableInfo
*) dobj
;
854 * finds the DumpableObject for the index with the given oid
855 * returns NULL if not found
858 findIndexByOid(Oid oid
)
861 DumpableObject
*dobj
;
863 catId
.tableoid
= RelationRelationId
;
865 dobj
= findObjectByCatalogId(catId
);
866 Assert(dobj
== NULL
|| dobj
->objType
== DO_INDEX
);
867 return (IndxInfo
*) dobj
;
872 * finds the DumpableObject for the type with the given oid
873 * returns NULL if not found
876 findTypeByOid(Oid oid
)
879 DumpableObject
*dobj
;
881 catId
.tableoid
= TypeRelationId
;
883 dobj
= findObjectByCatalogId(catId
);
884 Assert(dobj
== NULL
||
885 dobj
->objType
== DO_TYPE
|| dobj
->objType
== DO_DUMMY_TYPE
);
886 return (TypeInfo
*) dobj
;
891 * finds the DumpableObject for the function with the given oid
892 * returns NULL if not found
895 findFuncByOid(Oid oid
)
898 DumpableObject
*dobj
;
900 catId
.tableoid
= ProcedureRelationId
;
902 dobj
= findObjectByCatalogId(catId
);
903 Assert(dobj
== NULL
|| dobj
->objType
== DO_FUNC
);
904 return (FuncInfo
*) dobj
;
909 * finds the DumpableObject for the operator with the given oid
910 * returns NULL if not found
913 findOprByOid(Oid oid
)
916 DumpableObject
*dobj
;
918 catId
.tableoid
= OperatorRelationId
;
920 dobj
= findObjectByCatalogId(catId
);
921 Assert(dobj
== NULL
|| dobj
->objType
== DO_OPERATOR
);
922 return (OprInfo
*) dobj
;
927 * finds the DumpableObject for the collation with the given oid
928 * returns NULL if not found
931 findCollationByOid(Oid oid
)
934 DumpableObject
*dobj
;
936 catId
.tableoid
= CollationRelationId
;
938 dobj
= findObjectByCatalogId(catId
);
939 Assert(dobj
== NULL
|| dobj
->objType
== DO_COLLATION
);
940 return (CollInfo
*) dobj
;
945 * finds the DumpableObject for the namespace with the given oid
946 * returns NULL if not found
949 findNamespaceByOid(Oid oid
)
952 DumpableObject
*dobj
;
954 catId
.tableoid
= NamespaceRelationId
;
956 dobj
= findObjectByCatalogId(catId
);
957 Assert(dobj
== NULL
|| dobj
->objType
== DO_NAMESPACE
);
958 return (NamespaceInfo
*) dobj
;
963 * finds the DumpableObject for the extension with the given oid
964 * returns NULL if not found
967 findExtensionByOid(Oid oid
)
970 DumpableObject
*dobj
;
972 catId
.tableoid
= ExtensionRelationId
;
974 dobj
= findObjectByCatalogId(catId
);
975 Assert(dobj
== NULL
|| dobj
->objType
== DO_EXTENSION
);
976 return (ExtensionInfo
*) dobj
;
980 * findPublicationByOid
981 * finds the DumpableObject for the publication with the given oid
982 * returns NULL if not found
985 findPublicationByOid(Oid oid
)
988 DumpableObject
*dobj
;
990 catId
.tableoid
= PublicationRelationId
;
992 dobj
= findObjectByCatalogId(catId
);
993 Assert(dobj
== NULL
|| dobj
->objType
== DO_PUBLICATION
);
994 return (PublicationInfo
*) dobj
;
998 * findSubscriptionByOid
999 * finds the DumpableObject for the subscription with the given oid
1000 * returns NULL if not found
1003 findSubscriptionByOid(Oid oid
)
1006 DumpableObject
*dobj
;
1008 catId
.tableoid
= SubscriptionRelationId
;
1010 dobj
= findObjectByCatalogId(catId
);
1011 Assert(dobj
== NULL
|| dobj
->objType
== DO_SUBSCRIPTION
);
1012 return (SubscriptionInfo
*) dobj
;
1017 * recordExtensionMembership
1018 * Record that the object identified by the given catalog ID
1019 * belongs to the given extension
1022 recordExtensionMembership(CatalogId catId
, ExtensionInfo
*ext
)
1024 CatalogIdMapEntry
*entry
;
1027 /* CatalogId hash table must exist, if we have an ExtensionInfo */
1028 Assert(catalogIdHash
!= NULL
);
1030 /* Add reference to CatalogId hash */
1031 entry
= catalogid_insert(catalogIdHash
, catId
, &found
);
1037 Assert(entry
->ext
== NULL
);
1042 * findOwningExtension
1043 * return owning extension for specified catalog ID, or NULL if none
1046 findOwningExtension(CatalogId catalogId
)
1048 CatalogIdMapEntry
*entry
;
1050 if (catalogIdHash
== NULL
)
1051 return NULL
; /* no objects exist yet */
1053 entry
= catalogid_lookup(catalogIdHash
, catalogId
);
1062 * parse a string of numbers delimited by spaces into a character array
1064 * Note: actually this is used for both Oids and potentially-signed
1065 * attribute numbers. This should cause no trouble, but we could split
1066 * the function into two functions with different argument types if it does.
1070 parseOidArray(const char *str
, Oid
*array
, int arraysize
)
1082 if (s
== ' ' || s
== '\0')
1086 if (argNum
>= arraysize
)
1087 pg_fatal("could not parse numeric array \"%s\": too many numbers", str
);
1089 array
[argNum
++] = atooid(temp
);
1097 if (!(isdigit((unsigned char) s
) || s
== '-') ||
1098 j
>= sizeof(temp
) - 1)
1099 pg_fatal("could not parse numeric array \"%s\": invalid character in number", str
);
1104 while (argNum
< arraysize
)
1105 array
[argNum
++] = InvalidOid
;
1111 * takes in a string and a string array and the number of elements in the
1113 * returns the index if the string is somewhere in the array, -1 otherwise
1117 strInArray(const char *pattern
, char **arr
, int arr_size
)
1121 for (i
= 0; i
< arr_size
; i
++)
1123 if (strcmp(pattern
, arr
[i
]) == 0)