Minor JNI cleanups.
[sqlite.git] / src / pcache.c
blob42f22b70343011697723762b83cd0dd898e0fa0d
1 /*
2 ** 2008 August 05
3 **
4 ** The author disclaims copyright to this source code. In place of
5 ** a legal notice, here is a blessing:
6 **
7 ** May you do good and not evil.
8 ** May you find forgiveness for yourself and forgive others.
9 ** May you share freely, never taking more than you give.
11 *************************************************************************
12 ** This file implements that page cache.
14 #include "sqliteInt.h"
17 ** A complete page cache is an instance of this structure. Every
18 ** entry in the cache holds a single page of the database file. The
19 ** btree layer only operates on the cached copy of the database pages.
21 ** A page cache entry is "clean" if it exactly matches what is currently
22 ** on disk. A page is "dirty" if it has been modified and needs to be
23 ** persisted to disk.
25 ** pDirty, pDirtyTail, pSynced:
26 ** All dirty pages are linked into the doubly linked list using
27 ** PgHdr.pDirtyNext and pDirtyPrev. The list is maintained in LRU order
28 ** such that p was added to the list more recently than p->pDirtyNext.
29 ** PCache.pDirty points to the first (newest) element in the list and
30 ** pDirtyTail to the last (oldest).
32 ** The PCache.pSynced variable is used to optimize searching for a dirty
33 ** page to eject from the cache mid-transaction. It is better to eject
34 ** a page that does not require a journal sync than one that does.
35 ** Therefore, pSynced is maintained so that it *almost* always points
36 ** to either the oldest page in the pDirty/pDirtyTail list that has a
37 ** clear PGHDR_NEED_SYNC flag or to a page that is older than this one
38 ** (so that the right page to eject can be found by following pDirtyPrev
39 ** pointers).
41 struct PCache {
42 PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */
43 PgHdr *pSynced; /* Last synced page in dirty page list */
44 i64 nRefSum; /* Sum of ref counts over all pages */
45 int szCache; /* Configured cache size */
46 int szSpill; /* Size before spilling occurs */
47 int szPage; /* Size of every page in this cache */
48 int szExtra; /* Size of extra space for each page */
49 u8 bPurgeable; /* True if pages are on backing store */
50 u8 eCreate; /* eCreate value for for xFetch() */
51 int (*xStress)(void*,PgHdr*); /* Call to try make a page clean */
52 void *pStress; /* Argument to xStress */
53 sqlite3_pcache *pCache; /* Pluggable cache module */
56 /********************************** Test and Debug Logic **********************/
58 ** Debug tracing macros. Enable by by changing the "0" to "1" and
59 ** recompiling.
61 ** When sqlite3PcacheTrace is 1, single line trace messages are issued.
62 ** When sqlite3PcacheTrace is 2, a dump of the pcache showing all cache entries
63 ** is displayed for many operations, resulting in a lot of output.
65 #if defined(SQLITE_DEBUG) && 0
66 int sqlite3PcacheTrace = 2; /* 0: off 1: simple 2: cache dumps */
67 int sqlite3PcacheMxDump = 9999; /* Max cache entries for pcacheDump() */
68 # define pcacheTrace(X) if(sqlite3PcacheTrace){sqlite3DebugPrintf X;}
69 static void pcachePageTrace(int i, sqlite3_pcache_page *pLower){
70 PgHdr *pPg;
71 unsigned char *a;
72 int j;
73 if( pLower==0 ){
74 printf("%3d: NULL\n", i);
75 }else{
76 pPg = (PgHdr*)pLower->pExtra;
77 printf("%3d: nRef %2lld flgs %02x data ", i, pPg->nRef, pPg->flags);
78 a = (unsigned char *)pLower->pBuf;
79 for(j=0; j<12; j++) printf("%02x", a[j]);
80 printf(" ptr %p\n", pPg);
83 static void pcacheDump(PCache *pCache){
84 int N;
85 int i;
86 sqlite3_pcache_page *pLower;
88 if( sqlite3PcacheTrace<2 ) return;
89 if( pCache->pCache==0 ) return;
90 N = sqlite3PcachePagecount(pCache);
91 if( N>sqlite3PcacheMxDump ) N = sqlite3PcacheMxDump;
92 for(i=1; i<=N; i++){
93 pLower = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, i, 0);
94 pcachePageTrace(i, pLower);
95 if( pLower && ((PgHdr*)pLower)->pPage==0 ){
96 sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, pLower, 0);
100 #else
101 # define pcacheTrace(X)
102 # define pcachePageTrace(PGNO, X)
103 # define pcacheDump(X)
104 #endif
107 ** Return 1 if pPg is on the dirty list for pCache. Return 0 if not.
108 ** This routine runs inside of assert() statements only.
110 #ifdef SQLITE_DEBUG
111 static int pageOnDirtyList(PCache *pCache, PgHdr *pPg){
112 PgHdr *p;
113 for(p=pCache->pDirty; p; p=p->pDirtyNext){
114 if( p==pPg ) return 1;
116 return 0;
118 #endif
121 ** Check invariants on a PgHdr entry. Return true if everything is OK.
122 ** Return false if any invariant is violated.
124 ** This routine is for use inside of assert() statements only. For
125 ** example:
127 ** assert( sqlite3PcachePageSanity(pPg) );
129 #ifdef SQLITE_DEBUG
130 int sqlite3PcachePageSanity(PgHdr *pPg){
131 PCache *pCache;
132 assert( pPg!=0 );
133 assert( pPg->pgno>0 || pPg->pPager==0 ); /* Page number is 1 or more */
134 pCache = pPg->pCache;
135 assert( pCache!=0 ); /* Every page has an associated PCache */
136 if( pPg->flags & PGHDR_CLEAN ){
137 assert( (pPg->flags & PGHDR_DIRTY)==0 );/* Cannot be both CLEAN and DIRTY */
138 assert( !pageOnDirtyList(pCache, pPg) );/* CLEAN pages not on dirty list */
139 }else{
140 assert( (pPg->flags & PGHDR_DIRTY)!=0 );/* If not CLEAN must be DIRTY */
141 assert( pPg->pDirtyNext==0 || pPg->pDirtyNext->pDirtyPrev==pPg );
142 assert( pPg->pDirtyPrev==0 || pPg->pDirtyPrev->pDirtyNext==pPg );
143 assert( pPg->pDirtyPrev!=0 || pCache->pDirty==pPg );
144 assert( pageOnDirtyList(pCache, pPg) );
146 /* WRITEABLE pages must also be DIRTY */
147 if( pPg->flags & PGHDR_WRITEABLE ){
148 assert( pPg->flags & PGHDR_DIRTY ); /* WRITEABLE implies DIRTY */
150 /* NEED_SYNC can be set independently of WRITEABLE. This can happen,
151 ** for example, when using the sqlite3PagerDontWrite() optimization:
152 ** (1) Page X is journalled, and gets WRITEABLE and NEED_SEEK.
153 ** (2) Page X moved to freelist, WRITEABLE is cleared
154 ** (3) Page X reused, WRITEABLE is set again
155 ** If NEED_SYNC had been cleared in step 2, then it would not be reset
156 ** in step 3, and page might be written into the database without first
157 ** syncing the rollback journal, which might cause corruption on a power
158 ** loss.
160 ** Another example is when the database page size is smaller than the
161 ** disk sector size. When any page of a sector is journalled, all pages
162 ** in that sector are marked NEED_SYNC even if they are still CLEAN, just
163 ** in case they are later modified, since all pages in the same sector
164 ** must be journalled and synced before any of those pages can be safely
165 ** written.
167 return 1;
169 #endif /* SQLITE_DEBUG */
172 /********************************** Linked List Management ********************/
174 /* Allowed values for second argument to pcacheManageDirtyList() */
175 #define PCACHE_DIRTYLIST_REMOVE 1 /* Remove pPage from dirty list */
176 #define PCACHE_DIRTYLIST_ADD 2 /* Add pPage to the dirty list */
177 #define PCACHE_DIRTYLIST_FRONT 3 /* Move pPage to the front of the list */
180 ** Manage pPage's participation on the dirty list. Bits of the addRemove
181 ** argument determines what operation to do. The 0x01 bit means first
182 ** remove pPage from the dirty list. The 0x02 means add pPage back to
183 ** the dirty list. Doing both moves pPage to the front of the dirty list.
185 static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){
186 PCache *p = pPage->pCache;
188 pcacheTrace(("%p.DIRTYLIST.%s %d\n", p,
189 addRemove==1 ? "REMOVE" : addRemove==2 ? "ADD" : "FRONT",
190 pPage->pgno));
191 if( addRemove & PCACHE_DIRTYLIST_REMOVE ){
192 assert( pPage->pDirtyNext || pPage==p->pDirtyTail );
193 assert( pPage->pDirtyPrev || pPage==p->pDirty );
195 /* Update the PCache1.pSynced variable if necessary. */
196 if( p->pSynced==pPage ){
197 p->pSynced = pPage->pDirtyPrev;
200 if( pPage->pDirtyNext ){
201 pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev;
202 }else{
203 assert( pPage==p->pDirtyTail );
204 p->pDirtyTail = pPage->pDirtyPrev;
206 if( pPage->pDirtyPrev ){
207 pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext;
208 }else{
209 /* If there are now no dirty pages in the cache, set eCreate to 2.
210 ** This is an optimization that allows sqlite3PcacheFetch() to skip
211 ** searching for a dirty page to eject from the cache when it might
212 ** otherwise have to. */
213 assert( pPage==p->pDirty );
214 p->pDirty = pPage->pDirtyNext;
215 assert( p->bPurgeable || p->eCreate==2 );
216 if( p->pDirty==0 ){ /*OPTIMIZATION-IF-TRUE*/
217 assert( p->bPurgeable==0 || p->eCreate==1 );
218 p->eCreate = 2;
222 if( addRemove & PCACHE_DIRTYLIST_ADD ){
223 pPage->pDirtyPrev = 0;
224 pPage->pDirtyNext = p->pDirty;
225 if( pPage->pDirtyNext ){
226 assert( pPage->pDirtyNext->pDirtyPrev==0 );
227 pPage->pDirtyNext->pDirtyPrev = pPage;
228 }else{
229 p->pDirtyTail = pPage;
230 if( p->bPurgeable ){
231 assert( p->eCreate==2 );
232 p->eCreate = 1;
235 p->pDirty = pPage;
237 /* If pSynced is NULL and this page has a clear NEED_SYNC flag, set
238 ** pSynced to point to it. Checking the NEED_SYNC flag is an
239 ** optimization, as if pSynced points to a page with the NEED_SYNC
240 ** flag set sqlite3PcacheFetchStress() searches through all newer
241 ** entries of the dirty-list for a page with NEED_SYNC clear anyway. */
242 if( !p->pSynced
243 && 0==(pPage->flags&PGHDR_NEED_SYNC) /*OPTIMIZATION-IF-FALSE*/
245 p->pSynced = pPage;
248 pcacheDump(p);
252 ** Wrapper around the pluggable caches xUnpin method. If the cache is
253 ** being used for an in-memory database, this function is a no-op.
255 static void pcacheUnpin(PgHdr *p){
256 if( p->pCache->bPurgeable ){
257 pcacheTrace(("%p.UNPIN %d\n", p->pCache, p->pgno));
258 sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 0);
259 pcacheDump(p->pCache);
264 ** Compute the number of pages of cache requested. p->szCache is the
265 ** cache size requested by the "PRAGMA cache_size" statement.
267 static int numberOfCachePages(PCache *p){
268 if( p->szCache>=0 ){
269 /* IMPLEMENTATION-OF: R-42059-47211 If the argument N is positive then the
270 ** suggested cache size is set to N. */
271 return p->szCache;
272 }else{
273 i64 n;
274 /* IMPLEMENTATION-OF: R-59858-46238 If the argument N is negative, then the
275 ** number of cache pages is adjusted to be a number of pages that would
276 ** use approximately abs(N*1024) bytes of memory based on the current
277 ** page size. */
278 n = ((-1024*(i64)p->szCache)/(p->szPage+p->szExtra));
279 if( n>1000000000 ) n = 1000000000;
280 return (int)n;
284 /*************************************************** General Interfaces ******
286 ** Initialize and shutdown the page cache subsystem. Neither of these
287 ** functions are threadsafe.
289 int sqlite3PcacheInitialize(void){
290 if( sqlite3GlobalConfig.pcache2.xInit==0 ){
291 /* IMPLEMENTATION-OF: R-26801-64137 If the xInit() method is NULL, then the
292 ** built-in default page cache is used instead of the application defined
293 ** page cache. */
294 sqlite3PCacheSetDefault();
295 assert( sqlite3GlobalConfig.pcache2.xInit!=0 );
297 return sqlite3GlobalConfig.pcache2.xInit(sqlite3GlobalConfig.pcache2.pArg);
299 void sqlite3PcacheShutdown(void){
300 if( sqlite3GlobalConfig.pcache2.xShutdown ){
301 /* IMPLEMENTATION-OF: R-26000-56589 The xShutdown() method may be NULL. */
302 sqlite3GlobalConfig.pcache2.xShutdown(sqlite3GlobalConfig.pcache2.pArg);
307 ** Return the size in bytes of a PCache object.
309 int sqlite3PcacheSize(void){ return sizeof(PCache); }
312 ** Create a new PCache object. Storage space to hold the object
313 ** has already been allocated and is passed in as the p pointer.
314 ** The caller discovers how much space needs to be allocated by
315 ** calling sqlite3PcacheSize().
317 ** szExtra is some extra space allocated for each page. The first
318 ** 8 bytes of the extra space will be zeroed as the page is allocated,
319 ** but remaining content will be uninitialized. Though it is opaque
320 ** to this module, the extra space really ends up being the MemPage
321 ** structure in the pager.
323 int sqlite3PcacheOpen(
324 int szPage, /* Size of every page */
325 int szExtra, /* Extra space associated with each page */
326 int bPurgeable, /* True if pages are on backing store */
327 int (*xStress)(void*,PgHdr*),/* Call to try to make pages clean */
328 void *pStress, /* Argument to xStress */
329 PCache *p /* Preallocated space for the PCache */
331 memset(p, 0, sizeof(PCache));
332 p->szPage = 1;
333 p->szExtra = szExtra;
334 assert( szExtra>=8 ); /* First 8 bytes will be zeroed */
335 p->bPurgeable = bPurgeable;
336 p->eCreate = 2;
337 p->xStress = xStress;
338 p->pStress = pStress;
339 p->szCache = 100;
340 p->szSpill = 1;
341 pcacheTrace(("%p.OPEN szPage %d bPurgeable %d\n",p,szPage,bPurgeable));
342 return sqlite3PcacheSetPageSize(p, szPage);
346 ** Change the page size for PCache object. The caller must ensure that there
347 ** are no outstanding page references when this function is called.
349 int sqlite3PcacheSetPageSize(PCache *pCache, int szPage){
350 assert( pCache->nRefSum==0 && pCache->pDirty==0 );
351 if( pCache->szPage ){
352 sqlite3_pcache *pNew;
353 pNew = sqlite3GlobalConfig.pcache2.xCreate(
354 szPage, pCache->szExtra + ROUND8(sizeof(PgHdr)),
355 pCache->bPurgeable
357 if( pNew==0 ) return SQLITE_NOMEM_BKPT;
358 sqlite3GlobalConfig.pcache2.xCachesize(pNew, numberOfCachePages(pCache));
359 if( pCache->pCache ){
360 sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache);
362 pCache->pCache = pNew;
363 pCache->szPage = szPage;
364 pcacheTrace(("%p.PAGESIZE %d\n",pCache,szPage));
366 return SQLITE_OK;
370 ** Try to obtain a page from the cache.
372 ** This routine returns a pointer to an sqlite3_pcache_page object if
373 ** such an object is already in cache, or if a new one is created.
374 ** This routine returns a NULL pointer if the object was not in cache
375 ** and could not be created.
377 ** The createFlags should be 0 to check for existing pages and should
378 ** be 3 (not 1, but 3) to try to create a new page.
380 ** If the createFlag is 0, then NULL is always returned if the page
381 ** is not already in the cache. If createFlag is 1, then a new page
382 ** is created only if that can be done without spilling dirty pages
383 ** and without exceeding the cache size limit.
385 ** The caller needs to invoke sqlite3PcacheFetchFinish() to properly
386 ** initialize the sqlite3_pcache_page object and convert it into a
387 ** PgHdr object. The sqlite3PcacheFetch() and sqlite3PcacheFetchFinish()
388 ** routines are split this way for performance reasons. When separated
389 ** they can both (usually) operate without having to push values to
390 ** the stack on entry and pop them back off on exit, which saves a
391 ** lot of pushing and popping.
393 sqlite3_pcache_page *sqlite3PcacheFetch(
394 PCache *pCache, /* Obtain the page from this cache */
395 Pgno pgno, /* Page number to obtain */
396 int createFlag /* If true, create page if it does not exist already */
398 int eCreate;
399 sqlite3_pcache_page *pRes;
401 assert( pCache!=0 );
402 assert( pCache->pCache!=0 );
403 assert( createFlag==3 || createFlag==0 );
404 assert( pCache->eCreate==((pCache->bPurgeable && pCache->pDirty) ? 1 : 2) );
406 /* eCreate defines what to do if the page does not exist.
407 ** 0 Do not allocate a new page. (createFlag==0)
408 ** 1 Allocate a new page if doing so is inexpensive.
409 ** (createFlag==1 AND bPurgeable AND pDirty)
410 ** 2 Allocate a new page even it doing so is difficult.
411 ** (createFlag==1 AND !(bPurgeable AND pDirty)
413 eCreate = createFlag & pCache->eCreate;
414 assert( eCreate==0 || eCreate==1 || eCreate==2 );
415 assert( createFlag==0 || pCache->eCreate==eCreate );
416 assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) );
417 pRes = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate);
418 pcacheTrace(("%p.FETCH %d%s (result: %p) ",pCache,pgno,
419 createFlag?" create":"",pRes));
420 pcachePageTrace(pgno, pRes);
421 return pRes;
425 ** If the sqlite3PcacheFetch() routine is unable to allocate a new
426 ** page because no clean pages are available for reuse and the cache
427 ** size limit has been reached, then this routine can be invoked to
428 ** try harder to allocate a page. This routine might invoke the stress
429 ** callback to spill dirty pages to the journal. It will then try to
430 ** allocate the new page and will only fail to allocate a new page on
431 ** an OOM error.
433 ** This routine should be invoked only after sqlite3PcacheFetch() fails.
435 int sqlite3PcacheFetchStress(
436 PCache *pCache, /* Obtain the page from this cache */
437 Pgno pgno, /* Page number to obtain */
438 sqlite3_pcache_page **ppPage /* Write result here */
440 PgHdr *pPg;
441 if( pCache->eCreate==2 ) return 0;
443 if( sqlite3PcachePagecount(pCache)>pCache->szSpill ){
444 /* Find a dirty page to write-out and recycle. First try to find a
445 ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC
446 ** cleared), but if that is not possible settle for any other
447 ** unreferenced dirty page.
449 ** If the LRU page in the dirty list that has a clear PGHDR_NEED_SYNC
450 ** flag is currently referenced, then the following may leave pSynced
451 ** set incorrectly (pointing to other than the LRU page with NEED_SYNC
452 ** cleared). This is Ok, as pSynced is just an optimization. */
453 for(pPg=pCache->pSynced;
454 pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC));
455 pPg=pPg->pDirtyPrev
457 pCache->pSynced = pPg;
458 if( !pPg ){
459 for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pDirtyPrev);
461 if( pPg ){
462 int rc;
463 #ifdef SQLITE_LOG_CACHE_SPILL
464 sqlite3_log(SQLITE_FULL,
465 "spill page %d making room for %d - cache used: %d/%d",
466 pPg->pgno, pgno,
467 sqlite3GlobalConfig.pcache2.xPagecount(pCache->pCache),
468 numberOfCachePages(pCache));
469 #endif
470 pcacheTrace(("%p.SPILL %d\n",pCache,pPg->pgno));
471 rc = pCache->xStress(pCache->pStress, pPg);
472 pcacheDump(pCache);
473 if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){
474 return rc;
478 *ppPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, 2);
479 return *ppPage==0 ? SQLITE_NOMEM_BKPT : SQLITE_OK;
483 ** This is a helper routine for sqlite3PcacheFetchFinish()
485 ** In the uncommon case where the page being fetched has not been
486 ** initialized, this routine is invoked to do the initialization.
487 ** This routine is broken out into a separate function since it
488 ** requires extra stack manipulation that can be avoided in the common
489 ** case.
491 static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit(
492 PCache *pCache, /* Obtain the page from this cache */
493 Pgno pgno, /* Page number obtained */
494 sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */
496 PgHdr *pPgHdr;
497 assert( pPage!=0 );
498 pPgHdr = (PgHdr*)pPage->pExtra;
499 assert( pPgHdr->pPage==0 );
500 memset(&pPgHdr->pDirty, 0, sizeof(PgHdr) - offsetof(PgHdr,pDirty));
501 pPgHdr->pPage = pPage;
502 pPgHdr->pData = pPage->pBuf;
503 pPgHdr->pExtra = (void *)&pPgHdr[1];
504 memset(pPgHdr->pExtra, 0, 8);
505 pPgHdr->pCache = pCache;
506 pPgHdr->pgno = pgno;
507 pPgHdr->flags = PGHDR_CLEAN;
508 return sqlite3PcacheFetchFinish(pCache,pgno,pPage);
512 ** This routine converts the sqlite3_pcache_page object returned by
513 ** sqlite3PcacheFetch() into an initialized PgHdr object. This routine
514 ** must be called after sqlite3PcacheFetch() in order to get a usable
515 ** result.
517 PgHdr *sqlite3PcacheFetchFinish(
518 PCache *pCache, /* Obtain the page from this cache */
519 Pgno pgno, /* Page number obtained */
520 sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */
522 PgHdr *pPgHdr;
524 assert( pPage!=0 );
525 pPgHdr = (PgHdr *)pPage->pExtra;
527 if( !pPgHdr->pPage ){
528 return pcacheFetchFinishWithInit(pCache, pgno, pPage);
530 pCache->nRefSum++;
531 pPgHdr->nRef++;
532 assert( sqlite3PcachePageSanity(pPgHdr) );
533 return pPgHdr;
537 ** Decrement the reference count on a page. If the page is clean and the
538 ** reference count drops to 0, then it is made eligible for recycling.
540 void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p){
541 assert( p->nRef>0 );
542 p->pCache->nRefSum--;
543 if( (--p->nRef)==0 ){
544 if( p->flags&PGHDR_CLEAN ){
545 pcacheUnpin(p);
546 }else{
547 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT);
548 assert( sqlite3PcachePageSanity(p) );
554 ** Increase the reference count of a supplied page by 1.
556 void sqlite3PcacheRef(PgHdr *p){
557 assert(p->nRef>0);
558 assert( sqlite3PcachePageSanity(p) );
559 p->nRef++;
560 p->pCache->nRefSum++;
564 ** Drop a page from the cache. There must be exactly one reference to the
565 ** page. This function deletes that reference, so after it returns the
566 ** page pointed to by p is invalid.
568 void sqlite3PcacheDrop(PgHdr *p){
569 assert( p->nRef==1 );
570 assert( sqlite3PcachePageSanity(p) );
571 if( p->flags&PGHDR_DIRTY ){
572 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE);
574 p->pCache->nRefSum--;
575 sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 1);
579 ** Make sure the page is marked as dirty. If it isn't dirty already,
580 ** make it so.
582 void sqlite3PcacheMakeDirty(PgHdr *p){
583 assert( p->nRef>0 );
584 assert( sqlite3PcachePageSanity(p) );
585 if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){ /*OPTIMIZATION-IF-FALSE*/
586 p->flags &= ~PGHDR_DONT_WRITE;
587 if( p->flags & PGHDR_CLEAN ){
588 p->flags ^= (PGHDR_DIRTY|PGHDR_CLEAN);
589 pcacheTrace(("%p.DIRTY %d\n",p->pCache,p->pgno));
590 assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY );
591 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD);
592 assert( sqlite3PcachePageSanity(p) );
594 assert( sqlite3PcachePageSanity(p) );
599 ** Make sure the page is marked as clean. If it isn't clean already,
600 ** make it so.
602 void sqlite3PcacheMakeClean(PgHdr *p){
603 assert( sqlite3PcachePageSanity(p) );
604 assert( (p->flags & PGHDR_DIRTY)!=0 );
605 assert( (p->flags & PGHDR_CLEAN)==0 );
606 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE);
607 p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC|PGHDR_WRITEABLE);
608 p->flags |= PGHDR_CLEAN;
609 pcacheTrace(("%p.CLEAN %d\n",p->pCache,p->pgno));
610 assert( sqlite3PcachePageSanity(p) );
611 if( p->nRef==0 ){
612 pcacheUnpin(p);
617 ** Make every page in the cache clean.
619 void sqlite3PcacheCleanAll(PCache *pCache){
620 PgHdr *p;
621 pcacheTrace(("%p.CLEAN-ALL\n",pCache));
622 while( (p = pCache->pDirty)!=0 ){
623 sqlite3PcacheMakeClean(p);
628 ** Clear the PGHDR_NEED_SYNC and PGHDR_WRITEABLE flag from all dirty pages.
630 void sqlite3PcacheClearWritable(PCache *pCache){
631 PgHdr *p;
632 pcacheTrace(("%p.CLEAR-WRITEABLE\n",pCache));
633 for(p=pCache->pDirty; p; p=p->pDirtyNext){
634 p->flags &= ~(PGHDR_NEED_SYNC|PGHDR_WRITEABLE);
636 pCache->pSynced = pCache->pDirtyTail;
640 ** Clear the PGHDR_NEED_SYNC flag from all dirty pages.
642 void sqlite3PcacheClearSyncFlags(PCache *pCache){
643 PgHdr *p;
644 for(p=pCache->pDirty; p; p=p->pDirtyNext){
645 p->flags &= ~PGHDR_NEED_SYNC;
647 pCache->pSynced = pCache->pDirtyTail;
651 ** Change the page number of page p to newPgno.
653 void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){
654 PCache *pCache = p->pCache;
655 sqlite3_pcache_page *pOther;
656 assert( p->nRef>0 );
657 assert( newPgno>0 );
658 assert( sqlite3PcachePageSanity(p) );
659 pcacheTrace(("%p.MOVE %d -> %d\n",pCache,p->pgno,newPgno));
660 pOther = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, newPgno, 0);
661 if( pOther ){
662 PgHdr *pXPage = (PgHdr*)pOther->pExtra;
663 assert( pXPage->nRef==0 );
664 pXPage->nRef++;
665 pCache->nRefSum++;
666 sqlite3PcacheDrop(pXPage);
668 sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno);
669 p->pgno = newPgno;
670 if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){
671 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT);
672 assert( sqlite3PcachePageSanity(p) );
677 ** Drop every cache entry whose page number is greater than "pgno". The
678 ** caller must ensure that there are no outstanding references to any pages
679 ** other than page 1 with a page number greater than pgno.
681 ** If there is a reference to page 1 and the pgno parameter passed to this
682 ** function is 0, then the data area associated with page 1 is zeroed, but
683 ** the page object is not dropped.
685 void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){
686 if( pCache->pCache ){
687 PgHdr *p;
688 PgHdr *pNext;
689 pcacheTrace(("%p.TRUNCATE %d\n",pCache,pgno));
690 for(p=pCache->pDirty; p; p=pNext){
691 pNext = p->pDirtyNext;
692 /* This routine never gets call with a positive pgno except right
693 ** after sqlite3PcacheCleanAll(). So if there are dirty pages,
694 ** it must be that pgno==0.
696 assert( p->pgno>0 );
697 if( p->pgno>pgno ){
698 assert( p->flags&PGHDR_DIRTY );
699 sqlite3PcacheMakeClean(p);
702 if( pgno==0 && pCache->nRefSum ){
703 sqlite3_pcache_page *pPage1;
704 pPage1 = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache,1,0);
705 if( ALWAYS(pPage1) ){ /* Page 1 is always available in cache, because
706 ** pCache->nRefSum>0 */
707 memset(pPage1->pBuf, 0, pCache->szPage);
708 pgno = 1;
711 sqlite3GlobalConfig.pcache2.xTruncate(pCache->pCache, pgno+1);
716 ** Close a cache.
718 void sqlite3PcacheClose(PCache *pCache){
719 assert( pCache->pCache!=0 );
720 pcacheTrace(("%p.CLOSE\n",pCache));
721 sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache);
725 ** Discard the contents of the cache.
727 void sqlite3PcacheClear(PCache *pCache){
728 sqlite3PcacheTruncate(pCache, 0);
732 ** Merge two lists of pages connected by pDirty and in pgno order.
733 ** Do not bother fixing the pDirtyPrev pointers.
735 static PgHdr *pcacheMergeDirtyList(PgHdr *pA, PgHdr *pB){
736 PgHdr result, *pTail;
737 pTail = &result;
738 assert( pA!=0 && pB!=0 );
739 for(;;){
740 if( pA->pgno<pB->pgno ){
741 pTail->pDirty = pA;
742 pTail = pA;
743 pA = pA->pDirty;
744 if( pA==0 ){
745 pTail->pDirty = pB;
746 break;
748 }else{
749 pTail->pDirty = pB;
750 pTail = pB;
751 pB = pB->pDirty;
752 if( pB==0 ){
753 pTail->pDirty = pA;
754 break;
758 return result.pDirty;
762 ** Sort the list of pages in ascending order by pgno. Pages are
763 ** connected by pDirty pointers. The pDirtyPrev pointers are
764 ** corrupted by this sort.
766 ** Since there cannot be more than 2^31 distinct pages in a database,
767 ** there cannot be more than 31 buckets required by the merge sorter.
768 ** One extra bucket is added to catch overflow in case something
769 ** ever changes to make the previous sentence incorrect.
771 #define N_SORT_BUCKET 32
772 static PgHdr *pcacheSortDirtyList(PgHdr *pIn){
773 PgHdr *a[N_SORT_BUCKET], *p;
774 int i;
775 memset(a, 0, sizeof(a));
776 while( pIn ){
777 p = pIn;
778 pIn = p->pDirty;
779 p->pDirty = 0;
780 for(i=0; ALWAYS(i<N_SORT_BUCKET-1); i++){
781 if( a[i]==0 ){
782 a[i] = p;
783 break;
784 }else{
785 p = pcacheMergeDirtyList(a[i], p);
786 a[i] = 0;
789 if( NEVER(i==N_SORT_BUCKET-1) ){
790 /* To get here, there need to be 2^(N_SORT_BUCKET) elements in
791 ** the input list. But that is impossible.
793 a[i] = pcacheMergeDirtyList(a[i], p);
796 p = a[0];
797 for(i=1; i<N_SORT_BUCKET; i++){
798 if( a[i]==0 ) continue;
799 p = p ? pcacheMergeDirtyList(p, a[i]) : a[i];
801 return p;
805 ** Return a list of all dirty pages in the cache, sorted by page number.
807 PgHdr *sqlite3PcacheDirtyList(PCache *pCache){
808 PgHdr *p;
809 for(p=pCache->pDirty; p; p=p->pDirtyNext){
810 p->pDirty = p->pDirtyNext;
812 return pcacheSortDirtyList(pCache->pDirty);
816 ** Return the total number of references to all pages held by the cache.
818 ** This is not the total number of pages referenced, but the sum of the
819 ** reference count for all pages.
821 i64 sqlite3PcacheRefCount(PCache *pCache){
822 return pCache->nRefSum;
826 ** Return the number of references to the page supplied as an argument.
828 i64 sqlite3PcachePageRefcount(PgHdr *p){
829 return p->nRef;
833 ** Return the total number of pages in the cache.
835 int sqlite3PcachePagecount(PCache *pCache){
836 assert( pCache->pCache!=0 );
837 return sqlite3GlobalConfig.pcache2.xPagecount(pCache->pCache);
840 #ifdef SQLITE_TEST
842 ** Get the suggested cache-size value.
844 int sqlite3PcacheGetCachesize(PCache *pCache){
845 return numberOfCachePages(pCache);
847 #endif
850 ** Set the suggested cache-size value.
852 void sqlite3PcacheSetCachesize(PCache *pCache, int mxPage){
853 assert( pCache->pCache!=0 );
854 pCache->szCache = mxPage;
855 sqlite3GlobalConfig.pcache2.xCachesize(pCache->pCache,
856 numberOfCachePages(pCache));
860 ** Set the suggested cache-spill value. Make no changes if if the
861 ** argument is zero. Return the effective cache-spill size, which will
862 ** be the larger of the szSpill and szCache.
864 int sqlite3PcacheSetSpillsize(PCache *p, int mxPage){
865 int res;
866 assert( p->pCache!=0 );
867 if( mxPage ){
868 if( mxPage<0 ){
869 mxPage = (int)((-1024*(i64)mxPage)/(p->szPage+p->szExtra));
871 p->szSpill = mxPage;
873 res = numberOfCachePages(p);
874 if( res<p->szSpill ) res = p->szSpill;
875 return res;
879 ** Free up as much memory as possible from the page cache.
881 void sqlite3PcacheShrink(PCache *pCache){
882 assert( pCache->pCache!=0 );
883 sqlite3GlobalConfig.pcache2.xShrink(pCache->pCache);
887 ** Return the size of the header added by this middleware layer
888 ** in the page-cache hierarchy.
890 int sqlite3HeaderSizePcache(void){ return ROUND8(sizeof(PgHdr)); }
893 ** Return the number of dirty pages currently in the cache, as a percentage
894 ** of the configured cache size.
896 int sqlite3PCachePercentDirty(PCache *pCache){
897 PgHdr *pDirty;
898 int nDirty = 0;
899 int nCache = numberOfCachePages(pCache);
900 for(pDirty=pCache->pDirty; pDirty; pDirty=pDirty->pDirtyNext) nDirty++;
901 return nCache ? (int)(((i64)nDirty * 100) / nCache) : 0;
904 #ifdef SQLITE_DIRECT_OVERFLOW_READ
906 ** Return true if there are one or more dirty pages in the cache. Else false.
908 int sqlite3PCacheIsDirty(PCache *pCache){
909 return (pCache->pDirty!=0);
911 #endif
913 #if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG)
915 ** For all dirty pages currently in the cache, invoke the specified
916 ** callback. This is only used if the SQLITE_CHECK_PAGES macro is
917 ** defined.
919 void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)){
920 PgHdr *pDirty;
921 for(pDirty=pCache->pDirty; pDirty; pDirty=pDirty->pDirtyNext){
922 xIter(pDirty);
925 #endif