4 -- directory paths are passed to us in environment variables
5 \getenv abs_srcdir PG_ABS_SRCDIR
6 CREATE TABLE hash_i4_heap (
10 CREATE TABLE hash_name_heap (
14 CREATE TABLE hash_txt_heap (
18 CREATE TABLE hash_f8_heap (
22 \set filename :abs_srcdir '/data/hash.data'
23 COPY hash_i4_heap FROM :'filename';
24 COPY hash_name_heap FROM :'filename';
25 COPY hash_txt_heap FROM :'filename';
26 COPY hash_f8_heap FROM :'filename';
27 -- the data in this file has a lot of duplicates in the index key
28 -- fields, leading to long bucket chains and lots of table expansion.
29 -- this is therefore a stress test of the bucket overflow code (unlike
30 -- the data in hash.data, which has unique index keys).
32 -- \set filename :abs_srcdir '/data/hashovfl.data'
33 -- COPY hash_ovfl_heap FROM :'filename';
35 ANALYZE hash_name_heap;
36 ANALYZE hash_txt_heap;
38 CREATE INDEX hash_i4_index ON hash_i4_heap USING hash (random int4_ops);
39 CREATE INDEX hash_name_index ON hash_name_heap USING hash (random name_ops);
40 CREATE INDEX hash_txt_index ON hash_txt_heap USING hash (random text_ops);
41 CREATE INDEX hash_f8_index ON hash_f8_heap USING hash (random float8_ops)
44 -- Also try building functional, expressional, and partial indexes on
45 -- tables that already contain data.
47 create unique index hash_f8_index_1 on hash_f8_heap(abs(random));
48 create unique index hash_f8_index_2 on hash_f8_heap((seqno + 1), random);
49 create unique index hash_f8_index_3 on hash_f8_heap(random) where seqno > 1000;
52 -- grep 843938989 hash.data
54 SELECT * FROM hash_i4_heap
55 WHERE hash_i4_heap.random = 843938989;
63 -- grep 66766766 hash.data
65 SELECT * FROM hash_i4_heap
66 WHERE hash_i4_heap.random = 66766766;
73 -- grep 1505703298 hash.data
75 SELECT * FROM hash_name_heap
76 WHERE hash_name_heap.random = '1505703298'::name;
84 -- grep 7777777 hash.data
86 SELECT * FROM hash_name_heap
87 WHERE hash_name_heap.random = '7777777'::name;
94 -- grep 1351610853 hash.data
96 SELECT * FROM hash_txt_heap
97 WHERE hash_txt_heap.random = '1351610853'::text;
105 -- grep 111111112222222233333333 hash.data
107 SELECT * FROM hash_txt_heap
108 WHERE hash_txt_heap.random = '111111112222222233333333'::text;
115 -- grep 444705537 hash.data
117 SELECT * FROM hash_f8_heap
118 WHERE hash_f8_heap.random = '444705537'::float8;
126 -- grep 88888888 hash.data
128 SELECT * FROM hash_f8_heap
129 WHERE hash_f8_heap.random = '88888888'::float8;
136 -- grep '^90[^0-9]' hashovfl.data
138 -- SELECT count(*) AS i988 FROM hash_ovfl_heap
142 -- grep '^1000[^0-9]' hashovfl.data
144 -- SELECT count(*) AS i0 FROM hash_ovfl_heap
151 WHERE hash_i4_heap.seqno = 1492;
152 SELECT h.seqno AS i1492, h.random AS i1
162 WHERE hash_i4_heap.random = 1492795354;
163 SELECT h.seqno AS i20000
165 WHERE h.random = 1492795354;
171 UPDATE hash_name_heap
172 SET random = '0123456789abcdef'::name
173 WHERE hash_name_heap.seqno = 6543;
174 SELECT h.seqno AS i6543, h.random AS c0_to_f
175 FROM hash_name_heap h
176 WHERE h.random = '0123456789abcdef'::name;
178 -------+------------------
179 6543 | 0123456789abcdef
182 UPDATE hash_name_heap
184 WHERE hash_name_heap.random = '76652222'::name;
186 -- this is the row we just replaced; index scan should return zero rows
188 SELECT h.seqno AS emptyset
189 FROM hash_name_heap h
190 WHERE h.random = '76652222'::name;
196 SET random = '0123456789abcdefghijklmnop'::text
197 WHERE hash_txt_heap.seqno = 4002;
198 SELECT h.seqno AS i4002, h.random AS c0_to_p
200 WHERE h.random = '0123456789abcdefghijklmnop'::text;
202 -------+----------------------------
203 4002 | 0123456789abcdefghijklmnop
208 WHERE hash_txt_heap.random = '959363399'::text;
209 SELECT h.seqno AS t20000
211 WHERE h.random = '959363399'::text;
218 SET random = '-1234.1234'::float8
219 WHERE hash_f8_heap.seqno = 8906;
220 SELECT h.seqno AS i8096, h.random AS f1234_1234
222 WHERE h.random = '-1234.1234'::float8;
230 WHERE hash_f8_heap.random = '488912369'::float8;
231 SELECT h.seqno AS f20000
233 WHERE h.random = '488912369'::float8;
239 -- UPDATE hash_ovfl_heap
242 -- this vacuums the index as well
243 -- VACUUM hash_ovfl_heap;
244 -- SELECT count(*) AS i0 FROM hash_ovfl_heap
246 -- SELECT count(*) AS i988 FROM hash_ovfl_heap
249 -- Cause some overflow insert and splits.
251 CREATE TABLE hash_split_heap (keycol INT);
252 INSERT INTO hash_split_heap SELECT 1 FROM generate_series(1, 500) a;
253 CREATE INDEX hash_split_index on hash_split_heap USING HASH (keycol);
254 INSERT INTO hash_split_heap SELECT 1 FROM generate_series(1, 5000) a;
255 -- Let's do a backward scan.
257 SET enable_seqscan = OFF;
258 SET enable_bitmapscan = OFF;
259 DECLARE c CURSOR FOR SELECT * from hash_split_heap WHERE keycol = 1;
260 MOVE FORWARD ALL FROM c;
261 MOVE BACKWARD 10000 FROM c;
262 MOVE BACKWARD ALL FROM c;
265 -- DELETE, INSERT, VACUUM.
266 DELETE FROM hash_split_heap WHERE keycol = 1;
267 INSERT INTO hash_split_heap SELECT a/2 FROM generate_series(1, 25000) a;
268 VACUUM hash_split_heap;
269 -- Rebuild the index using a different fillfactor
270 ALTER INDEX hash_split_index SET (fillfactor = 10);
271 REINDEX INDEX hash_split_index;
273 DROP TABLE hash_split_heap;
274 -- Testcases for removing overflow pages.
275 CREATE TABLE hash_cleanup_heap(keycol INT);
276 CREATE INDEX hash_cleanup_index on hash_cleanup_heap USING HASH (keycol);
277 -- Insert tuples to both the primary bucket page and overflow pages.
278 INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
279 -- Fill overflow pages by "dead" tuples.
281 INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1000) as i;
283 -- Checkpoint will ensure that all hash buffers are cleaned before we try
284 -- to remove overflow pages.
286 -- This will squeeze the bucket and remove overflow pages.
287 VACUUM hash_cleanup_heap;
288 TRUNCATE hash_cleanup_heap;
289 -- Insert a few tuples so that the primary bucket page doesn't get full and
290 -- tuples can be moved to it.
291 INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 50) as i;
292 -- Fill overflow pages by "dead" tuples.
294 INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1500) as i;
296 -- And insert some tuples again. During squeeze operation, these will be moved
297 -- to the primary bucket allowing to test freeing intermediate overflow pages.
298 INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
300 VACUUM hash_cleanup_heap;
301 TRUNCATE hash_cleanup_heap;
302 -- Insert tuples to both the primary bucket page and overflow pages.
303 INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
304 -- Fill overflow pages by "dead" tuples.
306 INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1500) as i;
308 -- And insert some tuples again. During squeeze operation, these will be moved
309 -- to other overflow pages and also allow overflow pages filled by dead tuples
310 -- to be freed. Note the main purpose of this test is to test the case where
311 -- we don't need to move any tuple from the overflow page being freed.
312 INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 50) as i;
314 VACUUM hash_cleanup_heap;
316 DROP TABLE hash_cleanup_heap;
317 -- Index on temp table.
318 CREATE TEMP TABLE hash_temp_heap (x int, y int);
319 INSERT INTO hash_temp_heap VALUES (1,1);
320 CREATE INDEX hash_idx ON hash_temp_heap USING hash (x);
321 DROP TABLE hash_temp_heap CASCADE;
323 CREATE TABLE hash_heap_float4 (x float4, y int);
324 INSERT INTO hash_heap_float4 VALUES (1.1,1);
325 CREATE INDEX hash_idx ON hash_heap_float4 USING hash (x);
326 DROP TABLE hash_heap_float4 CASCADE;
327 -- Test out-of-range fillfactor values
328 CREATE INDEX hash_f8_index2 ON hash_f8_heap USING hash (random float8_ops)
330 ERROR: value 9 out of bounds for option "fillfactor"
331 DETAIL: Valid values are between "10" and "100".
332 CREATE INDEX hash_f8_index2 ON hash_f8_heap USING hash (random float8_ops)
333 WITH (fillfactor=101);
334 ERROR: value 101 out of bounds for option "fillfactor"
335 DETAIL: Valid values are between "10" and "100".