4 * Postgres-version-specific routines
6 * Copyright (c) 2010-2021, PostgreSQL Global Development Group
7 * src/bin/pg_upgrade/version.c
10 #include "postgres_fe.h"
12 #include "catalog/pg_class_d.h"
13 #include "fe_utils/string_utils.h"
14 #include "pg_upgrade.h"
17 * new_9_0_populate_pg_largeobject_metadata()
18 * new >= 9.0, old <= 8.4
19 * 9.0 has a new pg_largeobject permission table
22 new_9_0_populate_pg_largeobject_metadata(ClusterInfo
*cluster
, bool check_mode
)
27 char output_path
[MAXPGPATH
];
29 prep_status("Checking for large objects");
31 snprintf(output_path
, sizeof(output_path
), "pg_largeobject.sql");
33 for (dbnum
= 0; dbnum
< cluster
->dbarr
.ndbs
; dbnum
++)
37 DbInfo
*active_db
= &cluster
->dbarr
.dbs
[dbnum
];
38 PGconn
*conn
= connectToServer(cluster
, active_db
->db_name
);
40 /* find if there are any large objects */
41 res
= executeQueryOrDie(conn
,
43 "FROM pg_catalog.pg_largeobject ");
45 i_count
= PQfnumber(res
, "count");
46 if (atoi(PQgetvalue(res
, 0, i_count
)) != 0)
51 PQExpBufferData connectbuf
;
53 if (script
== NULL
&& (script
= fopen_priv(output_path
, "w")) == NULL
)
54 pg_fatal("could not open file \"%s\": %s\n", output_path
,
57 initPQExpBuffer(&connectbuf
);
58 appendPsqlMetaConnect(&connectbuf
, active_db
->db_name
);
59 fputs(connectbuf
.data
, script
);
60 termPQExpBuffer(&connectbuf
);
63 "SELECT pg_catalog.lo_create(t.loid)\n"
64 "FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) AS t;\n");
77 report_status(PG_WARNING
, "warning");
79 pg_log(PG_WARNING
, "\n"
80 "Your installation contains large objects. The new database has an\n"
81 "additional large object permission table. After upgrading, you will be\n"
82 "given a command to populate the pg_largeobject_metadata table with\n"
83 "default permissions.\n\n");
85 pg_log(PG_WARNING
, "\n"
86 "Your installation contains large objects. The new database has an\n"
87 "additional large object permission table, so default permissions must be\n"
88 "defined for all large objects. The file\n"
90 "when executed by psql by the database superuser will set the default\n"
100 * check_for_data_types_usage()
101 * Detect whether there are any stored columns depending on given type(s)
103 * If so, write a report to the given file name, and return true.
105 * base_query should be a SELECT yielding a single column named "oid",
106 * containing the pg_type OIDs of one or more types that are known to have
107 * inconsistent on-disk representations across server versions.
109 * We check for the type(s) in tables, matviews, and indexes, but not views;
110 * there's no storage involved in a view.
113 check_for_data_types_usage(ClusterInfo
*cluster
,
114 const char *base_query
,
115 const char *output_path
)
121 for (dbnum
= 0; dbnum
< cluster
->dbarr
.ndbs
; dbnum
++)
123 DbInfo
*active_db
= &cluster
->dbarr
.dbs
[dbnum
];
124 PGconn
*conn
= connectToServer(cluster
, active_db
->db_name
);
125 PQExpBufferData querybuf
;
127 bool db_used
= false;
135 * The type(s) of interest might be wrapped in a domain, array,
136 * composite, or range, and these container types can be nested (to
137 * varying extents depending on server version, but that's not of
138 * concern here). To handle all these cases we need a recursive CTE.
140 initPQExpBuffer(&querybuf
);
141 appendPQExpBuffer(&querybuf
,
142 "WITH RECURSIVE oids AS ( "
143 /* start with the type(s) returned by base_query */
147 /* inner WITH because we can only reference the CTE once */
148 " WITH x AS (SELECT oid FROM oids) "
149 /* domains on any type selected so far */
150 " SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typbasetype = x.oid AND typtype = 'd' "
152 /* arrays over any type selected so far */
153 " SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typelem = x.oid AND typtype = 'b' "
155 /* composite types containing any type selected so far */
156 " SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_class c, pg_catalog.pg_attribute a, x "
157 " WHERE t.typtype = 'c' AND "
158 " t.oid = c.reltype AND "
159 " c.oid = a.attrelid AND "
160 " NOT a.attisdropped AND "
161 " a.atttypid = x.oid ",
164 /* Ranges were introduced in 9.2 */
165 if (GET_MAJOR_VERSION(cluster
->major_version
) >= 902)
166 appendPQExpBufferStr(&querybuf
,
168 /* ranges containing any type selected so far */
169 " SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_range r, x "
170 " WHERE t.typtype = 'r' AND r.rngtypid = t.oid AND r.rngsubtype = x.oid");
172 appendPQExpBufferStr(&querybuf
,
175 /* now look for stored columns of any such type */
176 "SELECT n.nspname, c.relname, a.attname "
177 "FROM pg_catalog.pg_class c, "
178 " pg_catalog.pg_namespace n, "
179 " pg_catalog.pg_attribute a "
180 "WHERE c.oid = a.attrelid AND "
181 " NOT a.attisdropped AND "
182 " a.atttypid IN (SELECT oid FROM oids) AND "
184 CppAsString2(RELKIND_RELATION
) ", "
185 CppAsString2(RELKIND_MATVIEW
) ", "
186 CppAsString2(RELKIND_INDEX
) ") AND "
187 " c.relnamespace = n.oid AND "
188 /* exclude possible orphaned temp tables */
189 " n.nspname !~ '^pg_temp_' AND "
190 " n.nspname !~ '^pg_toast_temp_' AND "
191 /* exclude system catalogs, too */
192 " n.nspname NOT IN ('pg_catalog', 'information_schema')");
194 res
= executeQueryOrDie(conn
, "%s", querybuf
.data
);
196 ntups
= PQntuples(res
);
197 i_nspname
= PQfnumber(res
, "nspname");
198 i_relname
= PQfnumber(res
, "relname");
199 i_attname
= PQfnumber(res
, "attname");
200 for (rowno
= 0; rowno
< ntups
; rowno
++)
203 if (script
== NULL
&& (script
= fopen_priv(output_path
, "w")) == NULL
)
204 pg_fatal("could not open file \"%s\": %s\n", output_path
,
208 fprintf(script
, "In database: %s\n", active_db
->db_name
);
211 fprintf(script
, " %s.%s.%s\n",
212 PQgetvalue(res
, rowno
, i_nspname
),
213 PQgetvalue(res
, rowno
, i_relname
),
214 PQgetvalue(res
, rowno
, i_attname
));
219 termPQExpBuffer(&querybuf
);
231 * check_for_data_type_usage()
232 * Detect whether there are any stored columns depending on the given type
234 * If so, write a report to the given file name, and return true.
236 * type_name should be a fully qualified type name. This is just a
237 * trivial wrapper around check_for_data_types_usage() to convert a
238 * type name into a base query.
241 check_for_data_type_usage(ClusterInfo
*cluster
,
242 const char *type_name
,
243 const char *output_path
)
248 base_query
= psprintf("SELECT '%s'::pg_catalog.regtype AS oid",
251 found
= check_for_data_types_usage(cluster
, base_query
, output_path
);
260 * old_9_3_check_for_line_data_type_usage()
262 * Fully implement the 'line' data type in 9.4, which previously returned
263 * "not enabled" by default and was only functionally enabled with a
264 * compile-time switch; as of 9.4 "line" has a different on-disk
265 * representation format.
268 old_9_3_check_for_line_data_type_usage(ClusterInfo
*cluster
)
270 char output_path
[MAXPGPATH
];
272 prep_status("Checking for incompatible \"line\" data type");
274 snprintf(output_path
, sizeof(output_path
), "tables_using_line.txt");
276 if (check_for_data_type_usage(cluster
, "pg_catalog.line", output_path
))
278 pg_log(PG_REPORT
, "fatal\n");
279 pg_fatal("Your installation contains the \"line\" data type in user tables.\n"
280 "This data type changed its internal and input/output format\n"
281 "between your old and new versions so this\n"
282 "cluster cannot currently be upgraded. You can\n"
283 "drop the problem columns and restart the upgrade.\n"
284 "A list of the problem columns is in the file:\n"
285 " %s\n\n", output_path
);
293 * old_9_6_check_for_unknown_data_type_usage()
295 * It's no longer allowed to create tables or views with "unknown"-type
296 * columns. We do not complain about views with such columns, because
297 * they should get silently converted to "text" columns during the DDL
298 * dump and reload; it seems unlikely to be worth making users do that
299 * by hand. However, if there's a table with such a column, the DDL
300 * reload will fail, so we should pre-detect that rather than failing
301 * mid-upgrade. Worse, if there's a matview with such a column, the
302 * DDL reload will silently change it to "text" which won't match the
303 * on-disk storage (which is like "cstring"). So we *must* reject that.
306 old_9_6_check_for_unknown_data_type_usage(ClusterInfo
*cluster
)
308 char output_path
[MAXPGPATH
];
310 prep_status("Checking for invalid \"unknown\" user columns");
312 snprintf(output_path
, sizeof(output_path
), "tables_using_unknown.txt");
314 if (check_for_data_type_usage(cluster
, "pg_catalog.unknown", output_path
))
316 pg_log(PG_REPORT
, "fatal\n");
317 pg_fatal("Your installation contains the \"unknown\" data type in user tables.\n"
318 "This data type is no longer allowed in tables, so this\n"
319 "cluster cannot currently be upgraded. You can\n"
320 "drop the problem columns and restart the upgrade.\n"
321 "A list of the problem columns is in the file:\n"
322 " %s\n\n", output_path
);
329 * old_9_6_invalidate_hash_indexes()
331 * Hash index binary format has changed from 9.6->10.0
334 old_9_6_invalidate_hash_indexes(ClusterInfo
*cluster
, bool check_mode
)
339 char *output_path
= "reindex_hash.sql";
341 prep_status("Checking for hash indexes");
343 for (dbnum
= 0; dbnum
< cluster
->dbarr
.ndbs
; dbnum
++)
346 bool db_used
= false;
351 DbInfo
*active_db
= &cluster
->dbarr
.dbs
[dbnum
];
352 PGconn
*conn
= connectToServer(cluster
, active_db
->db_name
);
354 /* find hash indexes */
355 res
= executeQueryOrDie(conn
,
356 "SELECT n.nspname, c.relname "
357 "FROM pg_catalog.pg_class c, "
358 " pg_catalog.pg_index i, "
359 " pg_catalog.pg_am a, "
360 " pg_catalog.pg_namespace n "
361 "WHERE i.indexrelid = c.oid AND "
362 " c.relam = a.oid AND "
363 " c.relnamespace = n.oid AND "
367 ntups
= PQntuples(res
);
368 i_nspname
= PQfnumber(res
, "nspname");
369 i_relname
= PQfnumber(res
, "relname");
370 for (rowno
= 0; rowno
< ntups
; rowno
++)
375 if (script
== NULL
&& (script
= fopen_priv(output_path
, "w")) == NULL
)
376 pg_fatal("could not open file \"%s\": %s\n", output_path
,
380 PQExpBufferData connectbuf
;
382 initPQExpBuffer(&connectbuf
);
383 appendPsqlMetaConnect(&connectbuf
, active_db
->db_name
);
384 fputs(connectbuf
.data
, script
);
385 termPQExpBuffer(&connectbuf
);
388 fprintf(script
, "REINDEX INDEX %s.%s;\n",
389 quote_identifier(PQgetvalue(res
, rowno
, i_nspname
)),
390 quote_identifier(PQgetvalue(res
, rowno
, i_relname
)));
396 if (!check_mode
&& db_used
)
398 /* mark hash indexes as invalid */
399 PQclear(executeQueryOrDie(conn
,
400 "UPDATE pg_catalog.pg_index i "
401 "SET indisvalid = false "
402 "FROM pg_catalog.pg_class c, "
403 " pg_catalog.pg_am a, "
404 " pg_catalog.pg_namespace n "
405 "WHERE i.indexrelid = c.oid AND "
406 " c.relam = a.oid AND "
407 " c.relnamespace = n.oid AND "
408 " a.amname = 'hash'"));
419 report_status(PG_WARNING
, "warning");
421 pg_log(PG_WARNING
, "\n"
422 "Your installation contains hash indexes. These indexes have different\n"
423 "internal formats between your old and new clusters, so they must be\n"
424 "reindexed with the REINDEX command. After upgrading, you will be given\n"
425 "REINDEX instructions.\n\n");
427 pg_log(PG_WARNING
, "\n"
428 "Your installation contains hash indexes. These indexes have different\n"
429 "internal formats between your old and new clusters, so they must be\n"
430 "reindexed with the REINDEX command. The file\n"
432 "when executed by psql by the database superuser will recreate all invalid\n"
433 "indexes; until then, none of these indexes will be used.\n\n",
441 * old_11_check_for_sql_identifier_data_type_usage()
443 * In 12, the sql_identifier data type was switched from name to varchar,
444 * which does affect the storage (name is by-ref, but not varlena). This
445 * means user tables using sql_identifier for columns are broken because
446 * the on-disk format is different.
449 old_11_check_for_sql_identifier_data_type_usage(ClusterInfo
*cluster
)
451 char output_path
[MAXPGPATH
];
453 prep_status("Checking for invalid \"sql_identifier\" user columns");
455 snprintf(output_path
, sizeof(output_path
), "tables_using_sql_identifier.txt");
457 if (check_for_data_type_usage(cluster
, "information_schema.sql_identifier",
460 pg_log(PG_REPORT
, "fatal\n");
461 pg_fatal("Your installation contains the \"sql_identifier\" data type in user tables.\n"
462 "The on-disk format for this data type has changed, so this\n"
463 "cluster cannot currently be upgraded. You can\n"
464 "drop the problem columns and restart the upgrade.\n"
465 "A list of the problem columns is in the file:\n"
466 " %s\n\n", output_path
);
474 * report_extension_updates()
475 * Report extensions that should be updated.
478 report_extension_updates(ClusterInfo
*cluster
)
483 char *output_path
= "update_extensions.sql";
485 prep_status("Checking for extension updates");
487 for (dbnum
= 0; dbnum
< cluster
->dbarr
.ndbs
; dbnum
++)
490 bool db_used
= false;
494 DbInfo
*active_db
= &cluster
->dbarr
.dbs
[dbnum
];
495 PGconn
*conn
= connectToServer(cluster
, active_db
->db_name
);
497 /* find extensions needing updates */
498 res
= executeQueryOrDie(conn
,
500 "FROM pg_available_extensions "
501 "WHERE installed_version != default_version"
504 ntups
= PQntuples(res
);
505 i_name
= PQfnumber(res
, "name");
506 for (rowno
= 0; rowno
< ntups
; rowno
++)
510 if (script
== NULL
&& (script
= fopen_priv(output_path
, "w")) == NULL
)
511 pg_fatal("could not open file \"%s\": %s\n", output_path
,
515 PQExpBufferData connectbuf
;
517 initPQExpBuffer(&connectbuf
);
518 appendPsqlMetaConnect(&connectbuf
, active_db
->db_name
);
519 fputs(connectbuf
.data
, script
);
520 termPQExpBuffer(&connectbuf
);
523 fprintf(script
, "ALTER EXTENSION %s UPDATE;\n",
524 quote_identifier(PQgetvalue(res
, rowno
, i_name
)));
537 report_status(PG_REPORT
, "notice");
538 pg_log(PG_REPORT
, "\n"
539 "Your installation contains extensions that should be updated\n"
540 "with the ALTER EXTENSION command. The file\n"
542 "when executed by psql by the database superuser will update\n"
543 "these extensions.\n\n",