2 // Simplified version of mempool.c, that is more oriented towards
3 // checking that the description of invalid addresses is correct.
7 #include "tests/sys_mman.h"
11 #include "../memcheck.h"
13 #define SUPERBLOCK_SIZE 100000
14 #define REDZONE_SIZE 8
16 typedef struct _level_list
18 struct _level_list
*next
;
20 // Padding ensures the struct is the same size on 32-bit and 64-bit
22 char padding
[16 - 2*sizeof(char*)];
25 typedef struct _pool
{
30 // Padding ensures the struct is the same size on 32-bit and 64-bit
32 char padding
[24 - 3*sizeof(char*)];
35 pool
*make_pool( int use_mmap
)
40 p
= (pool
*)mmap(0, sizeof(pool
), PROT_READ
|PROT_WRITE
|PROT_EXEC
,
41 MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
42 p
->where
= p
->mem
= (char *)mmap(NULL
, SUPERBLOCK_SIZE
,
43 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
44 MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
46 p
= (pool
*)malloc(sizeof(pool
));
47 p
->where
= p
->mem
= (char *)malloc(SUPERBLOCK_SIZE
);
50 p
->size
= p
->left
= SUPERBLOCK_SIZE
;
52 (void) VALGRIND_MAKE_MEM_NOACCESS(p
->where
, SUPERBLOCK_SIZE
);
56 void push(pool
*p
, int use_mmap
)
61 l
= (level_list
*)mmap(0, sizeof(level_list
),
62 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
63 MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
65 l
= (level_list
*)malloc(sizeof(level_list
));
69 VALGRIND_CREATE_MEMPOOL(l
->where
, REDZONE_SIZE
, 0);
73 void pop(pool
*p
, int use_mmap
)
75 level_list
*l
= p
->levels
;
77 VALGRIND_DESTROY_MEMPOOL(l
->where
);
78 (void) VALGRIND_MAKE_MEM_NOACCESS(l
->where
, p
->where
-l
->where
);
81 munmap(l
, sizeof(level_list
));
86 void destroy_pool(pool
*p
, int use_mmap
)
88 level_list
*l
= p
->levels
;
94 munmap(p
->mem
, SUPERBLOCK_SIZE
);
95 munmap(p
, sizeof(pool
));
102 char *allocate(pool
*p
, int size
)
105 p
->left
-= size
+ (REDZONE_SIZE
*2);
106 where
= p
->where
+ REDZONE_SIZE
;
107 p
->where
+= size
+ (REDZONE_SIZE
*2);
108 VALGRIND_MEMPOOL_ALLOC(p
->levels
->where
, where
, size
);
112 //-------------------------------------------------------------------------
114 //-------------------------------------------------------------------------
121 // p1 is a malloc-backed pool
122 pool
*p1
= make_pool(0);
124 // p2 is a mmap-backed pool
125 pool
*p2
= make_pool(1);
130 x1
= allocate(p1
, 10);
131 x2
= allocate(p2
, 20);
134 "\n------ out of range reads in malloc-backed pool ------\n\n");
139 "\n------ out of range reads in mmap-backed pool ------\n\n");
140 res
+= x2
[-1]; // invalid
141 res
+= x2
[20]; // invalid
144 "\n------ read free in malloc-backed pool ------\n\n");
145 VALGRIND_MEMPOOL_FREE(p1
, x1
);
149 "\n------ read free in mmap-backed pool ------\n\n");
150 VALGRIND_MEMPOOL_FREE(p2
, x2
);
154 "\n------ double free in malloc-backed pool ------\n\n");
155 VALGRIND_MEMPOOL_FREE(p1
, x1
);
158 "\n------ double free in mmap-backed pool ------\n\n");
159 VALGRIND_MEMPOOL_FREE(p2
, x2
);
162 // test that redzone are still protected even if the user forgets
163 // to mark the superblock noaccess.
164 char superblock
[100];
166 VALGRIND_CREATE_MEMPOOL(superblock
, REDZONE_SIZE
, 0);
167 // User should mark the superblock no access to benefit
168 // from full Valgrind memcheck protection.
169 // VALGRIND_MEMPOOL_ALLOC will however still ensure the
170 // redzones are protected.
171 VALGRIND_MEMPOOL_ALLOC(superblock
, superblock
+30, 10);
173 res
+= superblock
[30]; // valid
174 res
+= superblock
[39]; // valid
177 "\n------ 2 invalid access in 'no no-access superblock' ---\n\n");
178 res
+= superblock
[29]; // invalid
179 res
+= superblock
[40]; // invalid
181 VALGRIND_DESTROY_MEMPOOL(superblock
);
183 // claim res is used, so gcc can't nuke this all
184 __asm__
__volatile__("" : : "r"(res
));
187 "\n------ done ------\n\n");