3 # The author disclaims copyright to this source code. In place of
4 # a legal notice, here is a blessing:
6 # May you do good and not evil.
7 # May you find forgiveness for yourself and forgive others.
8 # May you share freely, never taking more than you give.
10 #***********************************************************************
11 # This file implements regression tests for SQLite library.
12 # The focus of the tests in this file are to verify that the
13 # pager optimizations implemented in version 3.3.14 work.
15 # $Id: pageropt.test,v 1.5 2008/08/20 14:49:25 danielk1977 Exp $
17 set testdir [file dirname $argv0]
18 source $testdir/tester.tcl
20 ifcapable {!pager_pragmas||secure_delete||direct_read} {
25 # Run the SQL statement supplied by the argument and return
26 # the results. Prepend four integers to the beginning of the
29 # (1) The number of page reads from the database
30 # (2) The number of page writes to the database
31 # (3) The number of page writes to the journal
32 # (4) The number of cache pages freed
34 proc pagercount_sql {sql {db db}} {
35 global sqlite3_pager_readdb_count
36 global sqlite3_pager_writedb_count
37 global sqlite3_pager_writej_count
38 global sqlite3_pager_pgfree_count
39 set sqlite3_pager_readdb_count 0
40 set sqlite3_pager_writedb_count 0
41 set sqlite3_pager_writej_count 0
43 set cnt [list $sqlite3_pager_readdb_count \
44 $sqlite3_pager_writedb_count \
45 $sqlite3_pager_writej_count ]
46 return [concat $cnt $r]
49 # Setup the test database
51 do_test pageropt-1.1 {
52 sqlite3_soft_heap_limit 0
54 PRAGMA auto_vacuum = OFF;
55 PRAGMA page_size = 1024;
61 do_test pageropt-1.2 {
63 INSERT INTO t1 VALUES(randomblob(5000));
67 # Verify that values remain in cache on for subsequent reads.
68 # We should not have to go back to disk.
70 do_test pageropt-1.3 {
72 SELECT length(x) FROM t1
76 # If another thread reads the database, the original cache
80 set blobcontent [db2 one {SELECT hex(x) FROM t1}]
81 do_test pageropt-1.4 {
85 } [list 0 0 0 $blobcontent]
87 # But if the other thread modifies the database, then the cache
91 set x [expr {[permutation]=="mmap" ? 1 : 6}]
95 do_test pageropt-1.5 {
96 db2 eval {CREATE TABLE t2(y)}
100 } [list $x 0 0 $blobcontent]
101 do_test pageropt-1.6 {
103 SELECT hex(x) FROM t1
105 } [list 0 0 0 $blobcontent]
107 # Verify that the last page of an overflow chain is not read from
108 # disk when deleting a row. The one row of t1(x) has four pages
109 # of overflow. So deleting that row from t1 should involve reading
110 # the sqlite_master table (1 page) the main page of t1 (1 page) and
111 # the three overflow pages of t1 for a total of 5 pages.
113 # Pages written are page 1 (for the freelist pointer), the root page
114 # of the table, and one of the overflow chain pointers because it
115 # becomes the trunk of the freelist. Total 3.
117 do_test pageropt-2.1 {
121 DELETE FROM t1 WHERE rowid=1
125 # When pulling pages off of the freelist, there is no reason
126 # to actually bring in the old content.
128 do_test pageropt-2.2 {
132 INSERT INTO t1 VALUES(randomblob(1500));
135 do_test pageropt-2.3 {
137 INSERT INTO t1 VALUES(randomblob(1500));
141 # Note the new optimization that when pulling the very last page off of the
142 # freelist we do not read the content of that page.
144 do_test pageropt-2.4 {
146 INSERT INTO t1 VALUES(randomblob(1500));
150 # Appending a large quantity of data does not involve writing much
151 # to the journal file.
153 do_test pageropt-3.1 {
155 INSERT INTO t2 SELECT * FROM t1;
159 # Once again, we do not need to read the last page of an overflow chain
162 do_test pageropt-3.2 {
167 do_test pageropt-3.3 {
173 # There are now 11 pages on the freelist. Move them all into an
174 # overflow chain by inserting a single large record. Starting from
175 # a cold cache, only page 1, the root page of table t1, and the trunk
176 # of the freelist need to be read (3 pages). And only those three
177 # pages need to be journalled. But 13 pages need to be written:
178 # page1, the root page of table t1, and an 11 page overflow chain.
180 do_test pageropt-4.1 {
184 INSERT INTO t1 VALUES(randomblob(11300))
188 # Now we delete that big entries starting from a cold cache and an
189 # empty freelist. The first 10 of the 11 pages overflow chain have
190 # to be read, together with page1 and the root of the t1 table. 12
191 # reads total. But only page1, the t1 root, and the trunk of the
192 # freelist need to be journalled and written back.
194 do_test pageropt-4.2 {
202 sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit)