1 diff -U 5 -r db-5.3.21.old/src/dbinc_auto/int_def.in db-5.3.21/src/dbinc_auto/int_def.in
2 --- db-5.3.21.old/src/dbinc_auto/int_def.in 2012-05-12 01:57:53.000000000 +0800
3 +++ db-5.3.21/src/dbinc_auto/int_def.in 2016-10-25 22:40:58.000000000 +0800
4 @@ -1371,10 +1371,11 @@
5 #define __memp_failchk __memp_failchk@DB_VERSION_UNIQUE_NAME@
6 #define __memp_bhwrite __memp_bhwrite@DB_VERSION_UNIQUE_NAME@
7 #define __memp_pgread __memp_pgread@DB_VERSION_UNIQUE_NAME@
8 #define __memp_pg __memp_pg@DB_VERSION_UNIQUE_NAME@
9 #define __memp_bhfree __memp_bhfree@DB_VERSION_UNIQUE_NAME@
10 +#define __memp_bh_clear_dirty __memp_bh_clear_dirty@DB_VERSION_UNIQUE_NAME@
11 #define __memp_fget_pp __memp_fget_pp@DB_VERSION_UNIQUE_NAME@
12 #define __memp_fget __memp_fget@DB_VERSION_UNIQUE_NAME@
13 #define __memp_fcreate_pp __memp_fcreate_pp@DB_VERSION_UNIQUE_NAME@
14 #define __memp_fcreate __memp_fcreate@DB_VERSION_UNIQUE_NAME@
15 #define __memp_set_clear_len __memp_set_clear_len@DB_VERSION_UNIQUE_NAME@
16 @@ -1395,10 +1396,11 @@
17 #define __memp_fopen __memp_fopen@DB_VERSION_UNIQUE_NAME@
18 #define __memp_fclose_pp __memp_fclose_pp@DB_VERSION_UNIQUE_NAME@
19 #define __memp_fclose __memp_fclose@DB_VERSION_UNIQUE_NAME@
20 #define __memp_mf_discard __memp_mf_discard@DB_VERSION_UNIQUE_NAME@
21 #define __memp_inmemlist __memp_inmemlist@DB_VERSION_UNIQUE_NAME@
22 +#define __memp_mf_mark_dead __memp_mf_mark_dead@DB_VERSION_UNIQUE_NAME@
23 #define __memp_fput_pp __memp_fput_pp@DB_VERSION_UNIQUE_NAME@
24 #define __memp_fput __memp_fput@DB_VERSION_UNIQUE_NAME@
25 #define __memp_unpin_buffers __memp_unpin_buffers@DB_VERSION_UNIQUE_NAME@
26 #define __memp_dirty __memp_dirty@DB_VERSION_UNIQUE_NAME@
27 #define __memp_shared __memp_shared@DB_VERSION_UNIQUE_NAME@
28 @@ -1453,10 +1455,11 @@
29 #define __memp_fsync_pp __memp_fsync_pp@DB_VERSION_UNIQUE_NAME@
30 #define __memp_fsync __memp_fsync@DB_VERSION_UNIQUE_NAME@
31 #define __mp_xxx_fh __mp_xxx_fh@DB_VERSION_UNIQUE_NAME@
32 #define __memp_sync_int __memp_sync_int@DB_VERSION_UNIQUE_NAME@
33 #define __memp_mf_sync __memp_mf_sync@DB_VERSION_UNIQUE_NAME@
34 +#define __memp_purge_dead_files __memp_purge_dead_files@DB_VERSION_UNIQUE_NAME@
35 #define __memp_trickle_pp __memp_trickle_pp@DB_VERSION_UNIQUE_NAME@
36 #define __mutex_alloc __mutex_alloc@DB_VERSION_UNIQUE_NAME@
37 #define __mutex_alloc_int __mutex_alloc_int@DB_VERSION_UNIQUE_NAME@
38 #define __mutex_free __mutex_free@DB_VERSION_UNIQUE_NAME@
39 #define __mutex_free_int __mutex_free_int@DB_VERSION_UNIQUE_NAME@
40 diff -U 5 -r db-5.3.21.old/src/dbinc_auto/mp_ext.h db-5.3.21/src/dbinc_auto/mp_ext.h
41 --- db-5.3.21.old/src/dbinc_auto/mp_ext.h 2012-05-12 01:57:53.000000000 +0800
42 +++ db-5.3.21/src/dbinc_auto/mp_ext.h 2016-10-25 22:40:58.000000000 +0800
44 int __memp_failchk __P((ENV *));
45 int __memp_bhwrite __P((DB_MPOOL *, DB_MPOOL_HASH *, MPOOLFILE *, BH *, int));
46 int __memp_pgread __P((DB_MPOOLFILE *, BH *, int));
47 int __memp_pg __P((DB_MPOOLFILE *, db_pgno_t, void *, int));
48 int __memp_bhfree __P((DB_MPOOL *, REGINFO *, MPOOLFILE *, DB_MPOOL_HASH *, BH *, u_int32_t));
49 +void __memp_bh_clear_dirty __P((ENV*, DB_MPOOL_HASH *, BH *));
50 int __memp_fget_pp __P((DB_MPOOLFILE *, db_pgno_t *, DB_TXN *, u_int32_t, void *));
51 int __memp_fget __P((DB_MPOOLFILE *, db_pgno_t *, DB_THREAD_INFO *, DB_TXN *, u_int32_t, void *));
52 int __memp_fcreate_pp __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
53 int __memp_fcreate __P((ENV *, DB_MPOOLFILE **));
54 int __memp_set_clear_len __P((DB_MPOOLFILE *, u_int32_t));
56 int __memp_fopen __P((DB_MPOOLFILE *, MPOOLFILE *, const char *, const char **, u_int32_t, int, size_t));
57 int __memp_fclose_pp __P((DB_MPOOLFILE *, u_int32_t));
58 int __memp_fclose __P((DB_MPOOLFILE *, u_int32_t));
59 int __memp_mf_discard __P((DB_MPOOL *, MPOOLFILE *, int));
60 int __memp_inmemlist __P((ENV *, char ***, int *));
61 +void __memp_mf_mark_dead __P((DB_MPOOL *, MPOOLFILE *, int*));
62 int __memp_fput_pp __P((DB_MPOOLFILE *, void *, DB_CACHE_PRIORITY, u_int32_t));
63 int __memp_fput __P((DB_MPOOLFILE *, DB_THREAD_INFO *, void *, DB_CACHE_PRIORITY));
64 int __memp_unpin_buffers __P((ENV *, DB_THREAD_INFO *));
65 int __memp_dirty __P((DB_MPOOLFILE *, void *, DB_THREAD_INFO *, DB_TXN *, DB_CACHE_PRIORITY, u_int32_t));
66 int __memp_shared __P((DB_MPOOLFILE *, void *));
68 int __memp_fsync_pp __P((DB_MPOOLFILE *));
69 int __memp_fsync __P((DB_MPOOLFILE *));
70 int __mp_xxx_fh __P((DB_MPOOLFILE *, DB_FH **));
71 int __memp_sync_int __P((ENV *, DB_MPOOLFILE *, u_int32_t, u_int32_t, u_int32_t *, int *));
72 int __memp_mf_sync __P((DB_MPOOL *, MPOOLFILE *, int));
73 +int __memp_purge_dead_files __P((ENV *));
74 int __memp_trickle_pp __P((DB_ENV *, int, int *));
76 #if defined(__cplusplus)
79 diff -U 5 -r db-5.3.21.old/src/mp/mp_bh.c db-5.3.21/src/mp/mp_bh.c
80 --- db-5.3.21.old/src/mp/mp_bh.c 2012-05-12 01:57:53.000000000 +0800
81 +++ db-5.3.21/src/mp/mp_bh.c 2016-10-25 17:09:35.000000000 +0800
85 if (F_ISSET(bhp, BH_DIRTY | BH_TRASH)) {
86 MUTEX_LOCK(env, hp->mtx_hash);
87 DB_ASSERT(env, !SH_CHAIN_HASNEXT(bhp, vc));
88 - if (ret == 0 && F_ISSET(bhp, BH_DIRTY)) {
89 - F_CLR(bhp, BH_DIRTY | BH_DIRTY_CREATE);
90 - DB_ASSERT(env, atomic_read(&hp->hash_page_dirty) > 0);
91 - atomic_dec(env, &hp->hash_page_dirty);
94 + __memp_bh_clear_dirty(env, hp, bhp);
96 /* put the page back if necessary. */
97 if ((ret != 0 || BH_REFCOUNT(bhp) > 1) &&
98 F_ISSET(bhp, BH_TRASH)) {
99 ret = __memp_pg(dbmfp, bhp->pgno, bhp->buf, 1);
102 MUTEX_UNLOCK(env, mfp->mutex);
108 + * __memp_bh_clear_dirty --
109 + * Clear the dirty flag of of a buffer. Calls on the same buffer must be
110 + * serialized to get the accounting correct. This can be achieved by
111 + * acquiring an exclusive lock on the buffer, a shared lock on the
112 + * buffer plus an exclusive lock on the hash bucket, or some other
113 + * mechanism that guarantees single-thread access to the entire region
114 + * (e.g. during __memp_region_bhfree()).
116 + * PUBLIC: void __memp_bh_clear_dirty __P((ENV*, DB_MPOOL_HASH *, BH *));
119 +__memp_bh_clear_dirty(env, hp, bhp)
124 + COMPQUIET(env, env);
125 + if (F_ISSET(bhp, BH_DIRTY)) {
126 + F_CLR(bhp, BH_DIRTY | BH_DIRTY_CREATE);
127 + DB_ASSERT(env, atomic_read(&hp->hash_page_dirty) > 0);
128 + (void)atomic_dec(env, &hp->hash_page_dirty);
132 diff -U 5 -r db-5.3.21.old/src/mp/mp_fget.c db-5.3.21/src/mp/mp_fget.c
133 --- db-5.3.21.old/src/mp/mp_fget.c 2012-05-12 01:57:53.000000000 +0800
134 +++ db-5.3.21/src/mp/mp_fget.c 2016-10-25 17:11:08.000000000 +0800
135 @@ -437,16 +437,11 @@
136 * complain and get out.
138 if (flags == DB_MPOOL_FREE) {
139 freebuf: MUTEX_LOCK(env, hp->mtx_hash);
141 - if (F_ISSET(bhp, BH_DIRTY)) {
142 - F_CLR(bhp, BH_DIRTY | BH_DIRTY_CREATE);
144 - atomic_read(&hp->hash_page_dirty) > 0);
145 - atomic_dec(env, &hp->hash_page_dirty);
147 + __memp_bh_clear_dirty(env, hp, bhp);
150 * If the buffer we found is already freed, we're done.
151 * If the ref count is not 1 then someone may be
152 * peeking at the buffer. We cannot free it until they
153 diff -U 5 -r db-5.3.21.old/src/mp/mp_fopen.c db-5.3.21/src/mp/mp_fopen.c
154 --- db-5.3.21.old/src/mp/mp_fopen.c 2012-05-12 01:57:53.000000000 +0800
155 +++ db-5.3.21/src/mp/mp_fopen.c 2016-10-25 22:31:05.000000000 +0800
157 #include "dbinc/log.h"
158 #include "dbinc/mp.h"
159 #include "dbinc/db_page.h"
160 #include "dbinc/hash.h"
162 +static int __memp_count_dead_mutex __P((DB_MPOOL *, u_int32_t *));
163 static int __memp_mpf_alloc __P((DB_MPOOL *,
164 DB_MPOOLFILE *, const char *, u_int32_t, u_int32_t, MPOOLFILE **));
165 static int __memp_mpf_find __P((ENV *,
166 DB_MPOOLFILE *, DB_MPOOL_HASH *, const char *, u_int32_t, MPOOLFILE **));
168 @@ -709,11 +710,15 @@
169 * We should be able to set mfp to NULL and break out of the
170 * loop, but I like the idea of checking all the entries.
172 if (LF_ISSET(DB_TRUNCATE)) {
173 MUTEX_LOCK(env, mfp->mutex);
176 + * We cannot purge dead files here, because the caller
177 + * is holding the mutex of the hash bucket of mfp.
179 + __memp_mf_mark_dead(dbmp, mfp, NULL);
180 MUTEX_UNLOCK(env, mfp->mutex);
185 @@ -907,14 +912,15 @@
191 - int deleted, ret, t_ret;
192 + int deleted, purge_dead, ret, t_ret;
195 dbmp = env->mp_handle;
200 * Remove the DB_MPOOLFILE from the process' list.
202 @@ -1004,11 +1010,11 @@
204 DB_ASSERT(env, mfp->neutral_cnt < mfp->mpf_cnt);
205 if (--mfp->mpf_cnt == 0 || LF_ISSET(DB_MPOOL_DISCARD)) {
206 if (LF_ISSET(DB_MPOOL_DISCARD) ||
207 F_ISSET(mfp, MP_TEMP) || mfp->unlink_on_close) {
209 + __memp_mf_mark_dead(dbmp, mfp, &purge_dead);
211 if (mfp->unlink_on_close) {
212 if ((t_ret = __db_appname(dbmp->env, DB_APP_DATA,
213 R_ADDR(dbmp->reginfo, mfp->path_off), NULL,
214 &rpath)) != 0 && ret == 0)
215 @@ -1037,10 +1043,12 @@
219 if (!deleted && !LF_ISSET(DB_MPOOL_NOLOCK))
220 MUTEX_UNLOCK(env, mfp->mutex);
222 + (void)__memp_purge_dead_files(env);
224 done: /* Discard the DB_MPOOLFILE structure. */
225 if (dbmfp->pgcookie != NULL) {
226 __os_free(env, dbmfp->pgcookie->data);
227 __os_free(env, dbmfp->pgcookie);
228 @@ -1091,11 +1099,11 @@
230 * We have to release the MPOOLFILE mutex before acquiring the region
231 * mutex so we don't deadlock. Make sure nobody ever looks at this
235 + __memp_mf_mark_dead(dbmp, mfp, NULL);
237 /* Discard the mutex we're holding and return it too the pool. */
238 MUTEX_UNLOCK(env, mfp->mutex);
239 if ((t_ret = __mutex_free(env, &mfp->mutex)) != 0 && ret == 0)
241 @@ -1216,5 +1224,106 @@
242 /* Make sure we don't return any garbage. */
249 + * __memp_mf_mark_dead --
250 + * Mark an MPOOLFILE as dead because its contents are no longer necessary.
251 + * This happens when removing, truncation, or closing an unnamed in-memory
252 + * database. Return, in the purgep parameter, whether the caller should
253 + * call __memp_purge_dead_files() after the lock on mfp is released. The
254 + * caller must hold an exclusive lock on the mfp handle.
256 + * PUBLIC: void __memp_mf_mark_dead __P((DB_MPOOL *, MPOOLFILE *, int*));
259 +__memp_mf_mark_dead(dbmp, mfp, purgep)
265 +#ifdef HAVE_MUTEX_SUPPORT
267 + DB_MUTEXREGION *mtxregion;
268 + u_int32_t mutex_max, mutex_inuse, dead_mutex;
271 + if (purgep != NULL)
276 +#ifdef HAVE_MUTEX_SUPPORT
277 + MUTEX_REQUIRED(env, mfp->mutex);
279 + if (MUTEX_ON(env) && mfp->deadfile == 0) {
280 + infop = &env->mutex_handle->reginfo;
281 + mtxregion = infop->primary;
283 + mutex_inuse = mtxregion->stat.st_mutex_inuse;
284 + if ((mutex_max = env->dbenv->mutex_max) == 0)
285 + mutex_max = infop->rp->max / mtxregion->mutex_size;
288 + * Purging dead pages requires a full scan of the entire cache
289 + * buffer, so it is a slow operation. We only want to do it
290 + * when it is necessary and provides enough benefits. Below is
291 + * a simple heuristic that determines when to purge all dead
294 + if (purgep != NULL && mutex_inuse > mutex_max - 200) {
296 + * If the mutex region is almost full and there are
297 + * many mutexes held by dead files, purge dead files.
299 + (void)__memp_count_dead_mutex(dbmp, &dead_mutex);
300 + dead_mutex += mfp->block_cnt + 1;
302 + if (dead_mutex > mutex_inuse / 20)
312 + * __memp_count_dead_mutex --
313 + * Estimate the number of mutexes held by dead files.
316 +__memp_count_dead_mutex(dbmp, dead_mutex)
318 + u_int32_t *dead_mutex;
324 + u_int32_t mutex_per_file;
329 + mutex_per_file = 1;
330 +#ifndef HAVE_ATOMICFILEREAD
331 + mutex_per_file = 2;
333 + mp = dbmp->reginfo[0].primary;
334 + hp = R_ADDR(dbmp->reginfo, mp->ftab);
335 + for (i = 0; i < MPOOL_FILE_BUCKETS; i++, hp++) {
336 + busy = MUTEX_TRYLOCK(env, hp->mtx_hash);
339 + SH_TAILQ_FOREACH(mfp, &hp->hash_bucket, q, __mpoolfile) {
341 + *dead_mutex += mfp->block_cnt + mutex_per_file;
343 + MUTEX_UNLOCK(env, hp->mtx_hash);
348 diff -U 5 -r db-5.3.21.old/src/mp/mp_method.c db-5.3.21/src/mp/mp_method.c
349 --- db-5.3.21.old/src/mp/mp_method.c 2012-05-12 01:57:53.000000000 +0800
350 +++ db-5.3.21/src/mp/mp_method.c 2016-10-25 17:22:23.000000000 +0800
351 @@ -638,11 +638,11 @@
352 DB_MPOOL_HASH *hp, *nhp;
358 + int locked, purge_dead, ret;
363 #define op_is_remove (newname == NULL)
364 @@ -655,10 +655,11 @@
375 dbmp = env->mp_handle;
376 @@ -747,11 +748,11 @@
377 * they do not get reclaimed as long as they exist. Since we
378 * are now deleting the database, we need to dec that count.
380 if (mfp->no_backing_file)
383 + __memp_mf_mark_dead(dbmp, mfp, &purge_dead);
384 MUTEX_UNLOCK(env, mfp->mutex);
387 * Else, it's a rename. We've allocated memory for the new
388 * name. Swap it with the old one. If it's in memory we
389 @@ -806,10 +807,16 @@
391 MUTEX_UNLOCK(env, hp->mtx_hash);
392 if (nhp != NULL && nhp != hp)
393 MUTEX_UNLOCK(env, nhp->mtx_hash);
396 + * __memp_purge_dead_files() must be called when the hash bucket is
400 + (void)__memp_purge_dead_files(env);
405 * __memp_ftruncate __
406 diff -U 5 -r db-5.3.21.old/src/mp/mp_sync.c db-5.3.21/src/mp/mp_sync.c
407 --- db-5.3.21.old/src/mp/mp_sync.c 2012-05-12 01:57:53.000000000 +0800
408 +++ db-5.3.21/src/mp/mp_sync.c 2016-10-25 17:26:58.000000000 +0800
410 static int __bhcmp __P((const void *, const void *));
411 static int __memp_close_flush_files __P((ENV *, int));
412 static int __memp_sync_files __P((ENV *));
413 static int __memp_sync_file __P((ENV *,
414 MPOOLFILE *, void *, u_int32_t *, u_int32_t));
415 +static inline void __update_err_ret(int, int*);
418 * __memp_walk_files --
419 * PUBLIC: int __memp_walk_files __P((ENV *, MPOOL *,
420 * PUBLIC: int (*) __P((ENV *, MPOOLFILE *, void *,
421 @@ -961,5 +962,125 @@
423 if (bhp1->track_pgno > bhp2->track_pgno)
429 + * __memp_purge_dead_files --
430 + * Remove all dead files and their buffers from the mpool. The caller
431 + * cannot hold any lock on the dead MPOOLFILE handles, their buffers
432 + * or their hash buckets.
434 + * PUBLIC: int __memp_purge_dead_files __P((ENV *));
437 +__memp_purge_dead_files(env)
442 + DB_MPOOL_HASH *hp, *hp_end;
447 + int ret, t_ret, h_lock;
449 + if (!MPOOL_ON(env))
452 + dbmp = env->mp_handle;
453 + mp = dbmp->reginfo[0].primary;
454 + ret = t_ret = h_lock = 0;
457 + * Walk each cache's list of buffers and free all buffers whose
458 + * MPOOLFILE is marked as dead.
460 + for (i_cache = 0; i_cache < mp->nreg; i_cache++) {
461 + infop = &dbmp->reginfo[i_cache];
462 + c_mp = infop->primary;
464 + hp = R_ADDR(infop, c_mp->htab);
465 + hp_end = &hp[c_mp->htab_buckets];
466 + for (; hp < hp_end; hp++) {
467 + /* Skip empty buckets. */
468 + if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
472 + * Search for a dead buffer. Other places that call
473 + * __memp_bhfree() acquire the buffer lock before the
474 + * hash bucket lock. Even though we acquire the two
475 + * locks in reverse order, we cannot deadlock here
476 + * because we don't block waiting for the locks.
478 + t_ret = MUTEX_TRYLOCK(env, hp->mtx_hash);
480 + __update_err_ret(t_ret, &ret);
484 + SH_TAILQ_FOREACH(bhp, &hp->hash_bucket, hq, __bh) {
485 + /* Skip buffers that are being used. */
486 + if (BH_REFCOUNT(bhp) > 0)
489 + mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
490 + if (!mfp->deadfile)
493 + /* Found a dead buffer. Prepare to free it. */
494 + t_ret = MUTEX_TRYLOCK(env, bhp->mtx_buf);
496 + __update_err_ret(t_ret, &ret);
500 + DB_ASSERT(env, (!F_ISSET(bhp, BH_EXCLUSIVE) &&
501 + BH_REFCOUNT(bhp) == 0));
502 + F_SET(bhp, BH_EXCLUSIVE);
503 + (void)atomic_inc(env, &bhp->ref);
505 + __memp_bh_clear_dirty(env, hp, bhp);
508 + * Free the buffer. The buffer and hash bucket
509 + * are unlocked by __memp_bhfree.
511 + if ((t_ret = __memp_bhfree(dbmp, infop, mfp,
512 + hp, bhp, BH_FREE_FREEMEM)) == 0)
514 + * Decrement hp, so the next turn will
515 + * search the same bucket again.
519 + __update_err_ret(t_ret, &ret);
522 + * The hash bucket is unlocked, we need to
523 + * start over again.
530 + MUTEX_UNLOCK(env, hp->mtx_hash);
540 +__update_err_ret(t_ret, retp)
544 + if (t_ret != 0 && t_ret != DB_LOCK_NOTGRANTED && *retp == 0)
547 diff -U 5 -r db-5.3.21.old/src/mp/mp_trickle.c db-5.3.21/src/mp/mp_trickle.c
548 --- db-5.3.21.old/src/mp/mp_trickle.c 2012-05-12 01:57:53.000000000 +0800
549 +++ db-5.3.21/src/mp/mp_trickle.c 2016-10-25 17:27:57.000000000 +0800
551 "DB_ENV->memp_trickle: %d: percent must be between 1 and 100",
556 + /* First we purge all dead files and their buffers. */
557 + if ((ret = __memp_purge_dead_files(env)) != 0)
561 * Loop through the caches counting total/dirty buffers.
564 * Using hash_page_dirty is our only choice at the moment, but it's not
565 diff -U 5 -r db-5.3.21.old/src/mutex/mut_region.c db-5.3.21/src/mutex/mut_region.c
566 --- db-5.3.21.old/src/mutex/mut_region.c 2012-05-12 01:57:54.000000000 +0800
567 +++ db-5.3.21/src/mutex/mut_region.c 2016-10-25 17:34:22.000000000 +0800
569 #include "dbinc/txn.h"
571 static db_size_t __mutex_align_size __P((ENV *));
572 static int __mutex_region_init __P((ENV *, DB_MUTEXMGR *));
573 static size_t __mutex_region_size __P((ENV *));
574 -static size_t __mutex_region_max __P((ENV *));
575 +static size_t __mutex_region_max __P((ENV *, u_int32_t));
579 * Open a mutex region.
585 DB_MUTEXREGION *mtxregion;
587 - u_int32_t cpu_count;
588 + u_int32_t cpu_count, mutex_needed;
590 #ifndef HAVE_ATOMIC_SUPPORT
595 cpu_count : cpu_count * MUTEX_SPINS_PER_PROCESSOR)) != 0)
600 - * If the user didn't set an absolute value on the number of mutexes
601 - * we'll need, figure it out. We're conservative in our allocation,
602 - * we need mutexes for DB handles, group-commit queues and other things
603 - * applications allocate at run-time. The application may have kicked
604 - * up our count to allocate its own mutexes, add that in.
605 + * Figure out the number of mutexes we'll need. We're conservative in
606 + * our allocation, we need mutexes for DB handles, group-commit queues
607 + * and other things applications allocate at run-time. The application
608 + * may have kicked up our count to allocate its own mutexes, add that
612 + __lock_region_mutex_count(env) +
613 + __log_region_mutex_count(env) +
614 + __memp_region_mutex_count(env) +
615 + __txn_region_mutex_count(env);
616 if (dbenv->mutex_cnt == 0 &&
617 F_ISSET(env, ENV_PRIVATE | ENV_THREAD) != ENV_PRIVATE)
619 - __lock_region_mutex_count(env) +
620 - __log_region_mutex_count(env) +
621 - __memp_region_mutex_count(env) +
622 - __txn_region_mutex_count(env);
623 + dbenv->mutex_cnt = mutex_needed;
625 if (dbenv->mutex_max != 0 && dbenv->mutex_cnt > dbenv->mutex_max)
626 dbenv->mutex_cnt = dbenv->mutex_max;
628 /* Create/initialize the mutex manager structure. */
630 mtxmgr->reginfo.id = INVALID_REGION_ID;
631 mtxmgr->reginfo.flags = REGION_JOIN_OK;
632 size = __mutex_region_size(env);
634 F_SET(&mtxmgr->reginfo, REGION_CREATE_OK);
635 - if ((ret = __env_region_attach(env,
636 - &mtxmgr->reginfo, size, size + __mutex_region_max(env))) != 0)
637 + if ((ret = __env_region_attach(env, &mtxmgr->reginfo,
638 + size, size + __mutex_region_max(env, mutex_needed))) != 0)
641 /* If we created the region, initialize it. */
642 if (F_ISSET(&mtxmgr->reginfo, REGION_CREATE))
643 if ((ret = __mutex_region_init(env, mtxmgr)) != 0)
644 @@ -350,44 +351,62 @@
648 s = sizeof(DB_MUTEXMGR) + 1024;
650 - /* We discard one mutex for the OOB slot. */
652 + * We discard one mutex for the OOB slot. Make sure mutex_cnt doesn't
655 s += __env_alloc_size(
656 - (dbenv->mutex_cnt + 1) *__mutex_align_size(env));
657 + (dbenv->mutex_cnt + (dbenv->mutex_cnt == UINT32_MAX ? 0 : 1)) *
658 + __mutex_align_size(env));
664 * __mutex_region_max --
665 * Return the amount of space needed to reach the maximum size.
668 -__mutex_region_max(env)
669 +__mutex_region_max(env, mutex_needed)
671 + u_int32_t mutex_needed;
675 + u_int32_t max, mutex_cnt;
678 + mutex_cnt = dbenv->mutex_cnt;
680 - if ((max = dbenv->mutex_max) == 0) {
682 + * We want to limit the region size to accommodate at most UINT32_MAX
683 + * mutexes. If mutex_cnt is UINT32_MAX, no more space is allowed.
685 + if ((max = dbenv->mutex_max) == 0 && mutex_cnt != UINT32_MAX)
686 if (F_ISSET(env, ENV_PRIVATE | ENV_THREAD) == ENV_PRIVATE)
687 - max = dbenv->mutex_inc + 1;
689 + if (dbenv->mutex_inc + 1 < UINT32_MAX - mutex_cnt)
690 + max = dbenv->mutex_inc + 1 + mutex_cnt;
694 max = __lock_region_mutex_max(env) +
695 __txn_region_mutex_max(env) +
696 __log_region_mutex_max(env) +
697 dbenv->mutex_inc + 100;
698 - } else if (max <= dbenv->mutex_cnt)
699 + if (max < UINT32_MAX - mutex_needed)
700 + max += mutex_needed;
705 + if (max <= mutex_cnt)
708 - max -= dbenv->mutex_cnt;
710 - return ( __env_alloc_size(max * __mutex_align_size(env)));
711 + return (__env_alloc_size(
712 + (max - mutex_cnt) * __mutex_align_size(env)));
715 #ifdef HAVE_MUTEX_SYSTEM_RESOURCES
717 * __mutex_resource_return