2 * See the file LICENSE for redistribution information.
4 * Copyright (c) 1996, 1997, 1998
5 * Sleepycat Software. All rights reserved.
10 static const char sccsid
[] = "@(#)mp_sync.c 10.31 (Sleepycat) 12/11/98";
13 #ifndef NO_SYSTEM_INCLUDES
14 #include <sys/types.h>
24 #include "common_ext.h"
26 static int __bhcmp
__P((const void *, const void *));
27 static int __memp_fsync
__P((DB_MPOOLFILE
*));
31 * Mpool sync function.
42 int ar_cnt
, nalloc
, next
, maxpin
, ret
, wrote
;
49 if (dbenv
->lg_info
== NULL
) {
50 __db_err(dbenv
, "memp_sync: requires logging");
55 * We try and write the buffers in page order: it should reduce seeks
56 * by the underlying filesystem and possibly reduce the actual number
57 * of writes. We don't want to hold the region lock while we write
58 * the buffers, so only hold it lock while we create a list. Get a
59 * good-size block of memory to hold buffer pointers, we don't want
63 nalloc
= mp
->stat
.st_page_dirty
+ mp
->stat
.st_page_dirty
/ 2 + 10;
66 if ((ret
= __os_malloc(nalloc
* sizeof(BH
*), NULL
, &bharray
)) != 0)
72 * If the application is asking about a previous call to memp_sync(),
73 * and we haven't found any buffers that the application holding the
74 * pin couldn't write, return yes or no based on the current count.
75 * Note, if the application is asking about a LSN *smaller* than one
76 * we've already handled or are currently handling, then we return a
77 * result based on the count for the larger LSN.
79 if (!F_ISSET(mp
, MP_LSN_RETRY
) && log_compare(lsnp
, &mp
->lsn
) <= 0) {
80 if (mp
->lsn_cnt
== 0) {
88 /* Else, it's a new checkpoint. */
89 F_CLR(mp
, MP_LSN_RETRY
);
92 * Save the LSN. We know that it's a new LSN or larger than the one
93 * for which we were already doing a checkpoint. (BTW, I don't expect
94 * to see multiple LSN's from the same or multiple processes, but You
95 * Just Never Know. Responding as if they all called with the largest
96 * of the LSNs specified makes everything work.)
98 * We don't currently use the LSN we save. We could potentially save
99 * the last-written LSN in each buffer header and use it to determine
100 * what buffers need to be written. The problem with this is that it's
101 * sizeof(LSN) more bytes of buffer header. We currently write all the
102 * dirty buffers instead.
104 * Walk the list of shared memory segments clearing the count of
105 * buffers waiting to be written.
109 for (mfp
= SH_TAILQ_FIRST(&dbmp
->mp
->mpfq
, __mpoolfile
);
110 mfp
!= NULL
; mfp
= SH_TAILQ_NEXT(mfp
, q
, __mpoolfile
))
114 * Walk the list of buffers and mark all dirty buffers to be written
115 * and all pinned buffers to be potentially written (we can't know if
116 * we'll need to write them until the holding process returns them to
117 * the cache). We do this in one pass while holding the region locked
118 * so that processes can't make new buffers dirty, causing us to never
119 * finish. Since the application may have restarted the sync, clear
120 * any BH_WRITE flags that appear to be left over from previous calls.
122 * We don't want to pin down the entire buffer cache, otherwise we'll
123 * starve threads needing new pages. Don't pin down more than 80% of
126 * Keep a count of the total number of buffers we need to write in
127 * MPOOL->lsn_cnt, and for each file, in MPOOLFILE->lsn_count.
130 maxpin
= ((mp
->stat
.st_page_dirty
+ mp
->stat
.st_page_clean
) * 8) / 10;
131 for (bhp
= SH_TAILQ_FIRST(&mp
->bhq
, __bh
);
132 bhp
!= NULL
; bhp
= SH_TAILQ_NEXT(bhp
, q
, __bh
))
133 if (F_ISSET(bhp
, BH_DIRTY
) || bhp
->ref
!= 0) {
134 F_SET(bhp
, BH_WRITE
);
138 mfp
= R_ADDR(dbmp
, bhp
->mf_offset
);
142 * If the buffer isn't in use, we should be able to
143 * write it immediately, so increment the reference
144 * count to lock it and its contents down, and then
145 * save a reference to it.
147 * If we've run out space to store buffer references,
148 * we're screwed. We don't want to realloc the array
149 * while holding a region lock, so we set the flag to
150 * force the checkpoint to be done again, from scratch,
153 * If we've pinned down too much of the cache stop, and
154 * set a flag to force the checkpoint to be tried again
159 bharray
[ar_cnt
] = bhp
;
160 if (++ar_cnt
>= nalloc
|| ar_cnt
>= maxpin
) {
161 F_SET(mp
, MP_LSN_RETRY
);
166 if (F_ISSET(bhp
, BH_WRITE
))
167 F_CLR(bhp
, BH_WRITE
);
169 /* If there no buffers we can write immediately, we're done. */
171 ret
= mp
->lsn_cnt
? DB_INCOMPLETE
: 0;
177 /* Sort the buffers we're going to write. */
178 qsort(bharray
, ar_cnt
, sizeof(BH
*), __bhcmp
);
182 /* Walk the array, writing buffers. */
183 for (next
= 0; next
< ar_cnt
; ++next
) {
185 * It's possible for a thread to have gotten the buffer since
186 * we listed it for writing. If the reference count is still
187 * 1, we're the only ones using the buffer, go ahead and write.
188 * If it's >1, then skip the buffer and assume that it will be
189 * written when it's returned to the cache.
191 if (bharray
[next
]->ref
> 1) {
192 --bharray
[next
]->ref
;
196 /* Write the buffer. */
197 mfp
= R_ADDR(dbmp
, bharray
[next
]->mf_offset
);
198 ret
= __memp_bhwrite(dbmp
, mfp
, bharray
[next
], NULL
, &wrote
);
200 /* Release the buffer. */
201 --bharray
[next
]->ref
;
203 /* If there's an error, release the rest of the buffers. */
204 if (ret
!= 0 || !wrote
) {
206 * Any process syncing the shared memory buffer pool
207 * had better be able to write to any underlying file.
208 * Be understanding, but firm, on this point.
211 __db_err(dbenv
, "%s: unable to flush page: %lu",
212 __memp_fns(dbmp
, mfp
),
213 (u_long
)bharray
[next
]->pgno
);
217 while (++next
< ar_cnt
)
218 --bharray
[next
]->ref
;
222 ret
= mp
->lsn_cnt
!= 0 ||
223 F_ISSET(mp
, MP_LSN_RETRY
) ? DB_INCOMPLETE
: 0;
229 * MPOOL->lsn_cnt (the total sync count)
230 * MPOOLFILE->lsn_cnt (the per-file sync count)
231 * BH_WRITE flag (the scheduled for writing flag)
234 for (mfp
= SH_TAILQ_FIRST(&dbmp
->mp
->mpfq
, __mpoolfile
);
235 mfp
!= NULL
; mfp
= SH_TAILQ_NEXT(mfp
, q
, __mpoolfile
))
237 for (bhp
= SH_TAILQ_FIRST(&mp
->bhq
, __bh
);
238 bhp
!= NULL
; bhp
= SH_TAILQ_NEXT(bhp
, q
, __bh
))
239 F_CLR(bhp
, BH_WRITE
);
242 __os_free(bharray
, nalloc
* sizeof(BH
*));
248 * Mpool file sync function.
259 MP_PANIC_CHECK(dbmp
);
262 * If this handle doesn't have a file descriptor that's open for
263 * writing, or if the file is a temporary, there's no reason to
266 if (F_ISSET(dbmfp
, MP_READONLY
))
270 is_tmp
= F_ISSET(dbmfp
->mfp
, MP_TEMP
);
275 return (__memp_fsync(dbmfp
));
280 * Return a file descriptor for DB 1.85 compatibility locking.
282 * PUBLIC: int __mp_xxx_fd __P((DB_MPOOLFILE *, int *));
285 __mp_xxx_fd(dbmfp
, fdp
)
292 * This is a truly spectacular layering violation, intended ONLY to
293 * support compatibility for the DB 1.85 DB->fd call.
295 * Sync the database file to disk, creating the file as necessary.
297 * We skip the MP_READONLY and MP_TEMP tests done by memp_fsync(3).
298 * The MP_READONLY test isn't interesting because we will either
299 * already have a file descriptor (we opened the database file for
300 * reading) or we aren't readonly (we created the database which
301 * requires write privileges). The MP_TEMP test isn't interesting
302 * because we want to write to the backing file regardless so that
303 * we get a file descriptor to return.
305 ret
= dbmfp
->fd
== -1 ? __memp_fsync(dbmfp
) : 0;
307 return ((*fdp
= dbmfp
->fd
) == -1 ? ENOENT
: ret
);
312 * Mpool file internal sync function.
322 int ar_cnt
, incomplete
, nalloc
, next
, ret
, wrote
;
327 mf_offset
= R_OFFSET(dbmp
, dbmfp
->mfp
);
330 * We try and write the buffers in page order: it should reduce seeks
331 * by the underlying filesystem and possibly reduce the actual number
332 * of writes. We don't want to hold the region lock while we write
333 * the buffers, so only hold it lock while we create a list. Get a
334 * good-size block of memory to hold buffer pointers, we don't want
338 nalloc
= mp
->stat
.st_page_dirty
+ mp
->stat
.st_page_dirty
/ 2 + 10;
341 if ((ret
= __os_malloc(nalloc
* sizeof(BH
*), NULL
, &bharray
)) != 0)
347 * Walk the LRU list of buffer headers, and get a list of buffers to
348 * write for this MPOOLFILE.
350 ar_cnt
= incomplete
= 0;
351 for (bhp
= SH_TAILQ_FIRST(&mp
->bhq
, __bh
);
352 bhp
!= NULL
; bhp
= SH_TAILQ_NEXT(bhp
, q
, __bh
)) {
353 if (!F_ISSET(bhp
, BH_DIRTY
) || bhp
->mf_offset
!= mf_offset
)
355 if (bhp
->ref
!= 0 || F_ISSET(bhp
, BH_LOCKED
)) {
361 bharray
[ar_cnt
] = bhp
;
364 * If we've run out space to store buffer references, we're
365 * screwed, as we don't want to realloc the array holding a
366 * region lock. Set the incomplete flag -- the only way we
367 * can get here is if the file is active in the buffer cache,
368 * which is the same thing as finding pinned buffers.
370 if (++ar_cnt
>= nalloc
) {
378 /* Sort the buffers we're going to write. */
380 qsort(bharray
, ar_cnt
, sizeof(BH
*), __bhcmp
);
384 /* Walk the array, writing buffers. */
385 for (next
= 0; next
< ar_cnt
; ++next
) {
387 * It's possible for a thread to have gotten the buffer since
388 * we listed it for writing. If the reference count is still
389 * 1, we're the only ones using the buffer, go ahead and write.
390 * If it's >1, then skip the buffer.
392 if (bharray
[next
]->ref
> 1) {
395 --bharray
[next
]->ref
;
399 /* Write the buffer. */
400 ret
= __memp_pgwrite(dbmfp
, bharray
[next
], NULL
, &wrote
);
402 /* Release the buffer. */
403 --bharray
[next
]->ref
;
405 /* If there's an error, release the rest of the buffers. */
407 while (++next
< ar_cnt
)
408 --bharray
[next
]->ref
;
413 * If we didn't write the buffer for some reason, don't return
420 err
: UNLOCKREGION(dbmp
);
422 __os_free(bharray
, nalloc
* sizeof(BH
*));
425 * Sync the underlying file as the last thing we do, so that the OS
426 * has maximal opportunity to flush buffers before we request it.
429 * Don't lock the region around the sync, fsync(2) has no atomicity
433 return (incomplete
? DB_INCOMPLETE
: __os_fsync(dbmfp
->fd
));
439 * Keep a specified percentage of the buffers clean.
442 memp_trickle(dbmp
, pct
, nwrotep
)
453 MP_PANIC_CHECK(dbmp
);
459 if (pct
< 1 || pct
> 100)
465 * If there are sufficient clean buffers, or no buffers or no dirty
466 * buffers, we're done.
469 * Using st_page_clean and st_page_dirty is our only choice at the
470 * moment, but it's not as correct as we might like in the presence
471 * of pools with more than one buffer size, as a free 512-byte buffer
472 * isn't the same as a free 8K buffer.
474 loop
: total
= mp
->stat
.st_page_clean
+ mp
->stat
.st_page_dirty
;
475 if (total
== 0 || mp
->stat
.st_page_dirty
== 0 ||
476 (mp
->stat
.st_page_clean
* 100) / total
>= (u_long
)pct
) {
481 /* Loop until we write a buffer. */
482 for (bhp
= SH_TAILQ_FIRST(&mp
->bhq
, __bh
);
483 bhp
!= NULL
; bhp
= SH_TAILQ_NEXT(bhp
, q
, __bh
)) {
485 !F_ISSET(bhp
, BH_DIRTY
) || F_ISSET(bhp
, BH_LOCKED
))
488 mfp
= R_ADDR(dbmp
, bhp
->mf_offset
);
491 * We can't write to temporary files -- see the comment in
492 * mp_bh.c:__memp_bhwrite().
494 if (F_ISSET(mfp
, MP_TEMP
))
498 if ((ret
= __memp_bhwrite(dbmp
, mfp
, bhp
, NULL
, &wrote
)) != 0)
502 * Any process syncing the shared memory buffer pool had better
503 * be able to write to any underlying file. Be understanding,
504 * but firm, on this point.
507 __db_err(dbmp
->dbenv
, "%s: unable to flush page: %lu",
508 __memp_fns(dbmp
, mfp
), (u_long
)pgno
);
513 ++mp
->stat
.st_page_trickle
;
519 /* No more buffers to write. */
522 err
: UNLOCKREGION(dbmp
);
532 bhp1
= *(BH
* const *)p1
;
533 bhp2
= *(BH
* const *)p2
;
535 /* Sort by file (shared memory pool offset). */
536 if (bhp1
->mf_offset
< bhp2
->mf_offset
)
538 if (bhp1
->mf_offset
> bhp2
->mf_offset
)
543 * Defend against badly written quicksort code calling the comparison
544 * function with two identical pointers (e.g., WATCOM C++ (Power++)).
546 if (bhp1
->pgno
< bhp2
->pgno
)
548 if (bhp1
->pgno
> bhp2
->pgno
)