6 -- (any resemblance to real life is purely coincidental)
8 COPY aggtest FROM '@abs_srcdir@/data/agg.data';
10 COPY onek FROM '@abs_srcdir@/data/onek.data';
12 COPY onek TO '@abs_builddir@/results/onek.data';
16 COPY onek FROM '@abs_builddir@/results/onek.data';
18 COPY tenk1 FROM '@abs_srcdir@/data/tenk.data';
20 COPY slow_emp4000 FROM '@abs_srcdir@/data/rect.data';
22 COPY person FROM '@abs_srcdir@/data/person.data';
24 COPY emp FROM '@abs_srcdir@/data/emp.data';
26 COPY student FROM '@abs_srcdir@/data/student.data';
28 COPY stud_emp FROM '@abs_srcdir@/data/stud_emp.data';
30 COPY road FROM '@abs_srcdir@/data/streets.data';
32 COPY real_city FROM '@abs_srcdir@/data/real_city.data';
34 COPY hash_i4_heap FROM '@abs_srcdir@/data/hash.data';
36 COPY hash_name_heap FROM '@abs_srcdir@/data/hash.data';
38 COPY hash_txt_heap FROM '@abs_srcdir@/data/hash.data';
40 COPY hash_f8_heap FROM '@abs_srcdir@/data/hash.data';
42 COPY test_tsvector FROM '@abs_srcdir@/data/tsearch.data';
44 COPY testjsonb FROM '@abs_srcdir@/data/jsonb.data';
46 -- the data in this file has a lot of duplicates in the index key
47 -- fields, leading to long bucket chains and lots of table expansion.
48 -- this is therefore a stress test of the bucket overflow code (unlike
49 -- the data in hash.data, which has unique index keys).
51 -- COPY hash_ovfl_heap FROM '@abs_srcdir@/data/hashovfl.data';
53 COPY bt_i4_heap FROM '@abs_srcdir@/data/desc.data';
55 COPY bt_name_heap FROM '@abs_srcdir@/data/hash.data';
57 COPY bt_txt_heap FROM '@abs_srcdir@/data/desc.data';
59 COPY bt_f8_heap FROM '@abs_srcdir@/data/hash.data';
61 COPY array_op_test FROM '@abs_srcdir@/data/array.data';
63 COPY array_index_op_test FROM '@abs_srcdir@/data/array.data';
65 -- analyze all the data we just loaded, to ensure plan consistency
79 ANALYZE hash_name_heap;
80 ANALYZE hash_txt_heap;
82 ANALYZE test_tsvector;
87 ANALYZE array_op_test;
88 ANALYZE array_index_op_test;
90 --- test copying in CSV mode with various styles
91 --- of embedded line ending characters
93 create temp table copytest (
98 insert into copytest values('DOS',E'abc\r\ndef',1);
99 insert into copytest values('Unix',E'abc\ndef',2);
100 insert into copytest values('Mac',E'abc\rdef',3);
101 insert into copytest values(E'esc\\ape',E'a\\r\\\r\\\n\\nb',4);
103 copy copytest to '@abs_builddir@/results/copytest.csv' csv;
105 create temp table copytest2 (like copytest);
107 copy copytest2 from '@abs_builddir@/results/copytest.csv' csv;
109 select * from copytest except select * from copytest2;
113 --- same test but with an escape char different from quote char
115 copy copytest to '@abs_builddir@/results/copytest.csv' csv quote '''' escape E'\\';
117 copy copytest2 from '@abs_builddir@/results/copytest.csv' csv quote '''' escape E'\\';
119 select * from copytest except select * from copytest2;
122 -- test header line feature
124 create temp table copytest3 (
126 "col with , comma" text,
127 "col with "" quote" int);
129 copy copytest3 from stdin csv header;
130 this is just a line full of junk that would error out if parsed
135 copy copytest3 to stdout csv header;
137 -- test copy from with a partitioned table
138 create table parted_copytest (
142 ) partition by list (b);
144 create table parted_copytest_a1 (c text, b int, a int);
145 create table parted_copytest_a2 (a int, c text, b int);
147 alter table parted_copytest attach partition parted_copytest_a1 for values in(1);
148 alter table parted_copytest attach partition parted_copytest_a2 for values in(2);
150 -- We must insert enough rows to trigger multi-inserts. These are only
151 -- enabled adaptively when there are few enough partition changes.
152 insert into parted_copytest select x,1,'One' from generate_series(1,1000) x;
153 insert into parted_copytest select x,2,'Two' from generate_series(1001,1010) x;
154 insert into parted_copytest select x,1,'One' from generate_series(1011,1020) x;
156 copy (select * from parted_copytest order by a) to '@abs_builddir@/results/parted_copytest.csv';
158 truncate parted_copytest;
160 copy parted_copytest from '@abs_builddir@/results/parted_copytest.csv';
162 -- Ensure COPY FREEZE errors for partitioned tables.
164 truncate parted_copytest;
165 copy parted_copytest from '@abs_builddir@/results/parted_copytest.csv' (freeze);
168 select tableoid::regclass,count(*),sum(a) from parted_copytest
169 group by tableoid order by tableoid::regclass::name;
171 truncate parted_copytest;
173 -- create before insert row trigger on parted_copytest_a2
174 create function part_ins_func() returns trigger language plpgsql as $$
180 create trigger part_ins_trig
181 before insert on parted_copytest_a2
183 execute procedure part_ins_func();
185 copy parted_copytest from '@abs_builddir@/results/parted_copytest.csv';
187 select tableoid::regclass,count(*),sum(a) from parted_copytest
188 group by tableoid order by tableoid::regclass::name;
190 truncate table parted_copytest;
191 create index on parted_copytest (b);
192 drop trigger part_ins_trig on parted_copytest_a2;
194 copy parted_copytest from stdin;
199 -- Ensure index entries were properly added during the copy.
200 select * from parted_copytest where b = 1;
201 select * from parted_copytest where b = 2;
203 drop table parted_copytest;
206 -- Progress reporting for COPY
208 create table tab_progress_reporting (
216 -- Add a trigger to catch and print the contents of the catalog view
217 -- pg_stat_progress_copy during data insertion. This allows to test
218 -- the validation of some progress reports for COPY FROM where the trigger
220 create function notice_after_tab_progress_reporting() returns trigger AS
222 declare report record;
224 -- The fields ignored here are the ones that may not remain
225 -- consistent across multiple runs. The sizes reported may differ
226 -- across platforms, so just check if these are strictly positive.
227 with progress_data as (
229 relid::regclass::text as relname,
232 bytes_processed > 0 as has_bytes_processed,
233 bytes_total > 0 as has_bytes_total,
236 from pg_stat_progress_copy
237 where pid = pg_backend_pid())
238 select into report (to_jsonb(r)) as value
239 from progress_data r;
241 raise info 'progress: %', report.value::text;
246 create trigger check_after_tab_progress_reporting
247 after insert on tab_progress_reporting
249 execute function notice_after_tab_progress_reporting();
251 -- Generate COPY FROM report with PIPE.
252 copy tab_progress_reporting from stdin;
253 sharon 25 (15,12) 1000 sam
254 sam 30 (10,5) 2000 bill
255 bill 20 (11,10) 1000 sharon
258 -- Generate COPY FROM report with FILE, with some excluded tuples.
259 truncate tab_progress_reporting;
260 copy tab_progress_reporting from '@abs_srcdir@/data/emp.data'
261 where (salary < 2000);
263 drop trigger check_after_tab_progress_reporting on tab_progress_reporting;
264 drop function notice_after_tab_progress_reporting();
265 drop table tab_progress_reporting;