2 * Test that VMA updates do not race.
4 * SPDX-License-Identifier: GPL-2.0-or-later
6 * Map a contiguous chunk of RWX memory. Split it into 8 equally sized
7 * regions, each of which is guaranteed to have a certain combination of
10 * Reader, writer and executor threads perform the respective operations on
11 * pages, which are guaranteed to have the respective protection bit set.
12 * Two mutator threads change the non-fixed protection bits randomly.
26 #define PAGE_IDX_BITS 10
27 #define PAGE_COUNT (1 << PAGE_IDX_BITS)
28 #define PAGE_IDX_MASK (PAGE_COUNT - 1)
29 #define REGION_IDX_BITS 3
30 #define PAGE_IDX_R_MASK (1 << 7)
31 #define PAGE_IDX_W_MASK (1 << 8)
32 #define PAGE_IDX_X_MASK (1 << 9)
33 #define REGION_MASK (PAGE_IDX_R_MASK | PAGE_IDX_W_MASK | PAGE_IDX_X_MASK)
34 #define PAGES_PER_REGION (1 << (PAGE_IDX_BITS - REGION_IDX_BITS))
40 volatile int mutator_count
;
43 static void *thread_read(void *arg
)
45 struct context
*ctx
= arg
;
50 for (i
= 0; ctx
->mutator_count
; i
++) {
53 j
= (i
& PAGE_IDX_MASK
) | PAGE_IDX_R_MASK
;
54 p
= &ctx
->ptr
[j
* ctx
->pagesize
];
57 ret
= memcmp(p
, nop_func
, sizeof(nop_func
));
59 fprintf(stderr
, "fail direct read %p\n", p
);
63 /* Read indirectly. */
64 sret
= write(ctx
->dev_null_fd
, p
, 1);
67 fprintf(stderr
, "fail indirect read %p (%m)\n", p
);
69 fprintf(stderr
, "fail indirect read %p (%zd)\n", p
, sret
);
78 static void *thread_write(void *arg
)
80 struct context
*ctx
= arg
;
85 for (i
= 0; ctx
->mutator_count
; i
++) {
86 j
= (i
& PAGE_IDX_MASK
) | PAGE_IDX_W_MASK
;
89 memcpy(&ctx
->ptr
[j
* ctx
->pagesize
], nop_func
, sizeof(nop_func
));
91 /* Write using a syscall. */
92 ts
= (struct timespec
*)(&ctx
->ptr
[(j
+ 1) * ctx
->pagesize
] -
93 sizeof(struct timespec
));
94 ret
= clock_gettime(CLOCK_REALTIME
, ts
);
96 fprintf(stderr
, "fail indirect write %p (%m)\n", ts
);
104 static void *thread_execute(void *arg
)
106 struct context
*ctx
= arg
;
109 for (i
= 0; ctx
->mutator_count
; i
++) {
110 j
= (i
& PAGE_IDX_MASK
) | PAGE_IDX_X_MASK
;
111 ((void(*)(void))&ctx
->ptr
[j
* ctx
->pagesize
])();
117 static void *thread_mutate(void *arg
)
119 size_t i
, start_idx
, end_idx
, page_idx
, tmp
;
120 struct context
*ctx
= arg
;
124 seed
= (unsigned int)time(NULL
);
125 for (i
= 0; i
< 10000; i
++) {
126 start_idx
= rand_r(&seed
) & PAGE_IDX_MASK
;
127 end_idx
= rand_r(&seed
) & PAGE_IDX_MASK
;
128 if (start_idx
> end_idx
) {
133 prot
= rand_r(&seed
) & (PROT_READ
| PROT_WRITE
| PROT_EXEC
);
134 for (page_idx
= start_idx
& REGION_MASK
; page_idx
<= end_idx
;
135 page_idx
+= PAGES_PER_REGION
) {
136 if (page_idx
& PAGE_IDX_R_MASK
) {
139 if (page_idx
& PAGE_IDX_W_MASK
) {
140 /* FIXME: qemu syscalls check for both read+write. */
141 prot
|= PROT_WRITE
| PROT_READ
;
143 if (page_idx
& PAGE_IDX_X_MASK
) {
147 ret
= mprotect(&ctx
->ptr
[start_idx
* ctx
->pagesize
],
148 (end_idx
- start_idx
+ 1) * ctx
->pagesize
, prot
);
152 __atomic_fetch_sub(&ctx
->mutator_count
, 1, __ATOMIC_SEQ_CST
);
159 pthread_t threads
[5];
164 /* Without a template, nothing to test. */
165 if (sizeof(nop_func
) == 0) {
169 /* Initialize memory chunk. */
170 ctx
.pagesize
= getpagesize();
171 ctx
.ptr
= mmap(NULL
, PAGE_COUNT
* ctx
.pagesize
,
172 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
173 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
174 assert(ctx
.ptr
!= MAP_FAILED
);
175 for (i
= 0; i
< PAGE_COUNT
; i
++) {
176 memcpy(&ctx
.ptr
[i
* ctx
.pagesize
], nop_func
, sizeof(nop_func
));
178 ctx
.dev_null_fd
= open("/dev/null", O_WRONLY
);
179 assert(ctx
.dev_null_fd
>= 0);
180 ctx
.mutator_count
= 2;
183 ret
= pthread_create(&threads
[0], NULL
, thread_read
, &ctx
);
185 ret
= pthread_create(&threads
[1], NULL
, thread_write
, &ctx
);
187 ret
= pthread_create(&threads
[2], NULL
, thread_execute
, &ctx
);
189 for (i
= 3; i
<= 4; i
++) {
190 ret
= pthread_create(&threads
[i
], NULL
, thread_mutate
, &ctx
);
194 /* Wait for threads to stop. */
195 for (i
= 0; i
< sizeof(threads
) / sizeof(threads
[0]); i
++) {
196 ret
= pthread_join(threads
[i
], NULL
);
200 /* Destroy memory chunk. */
201 ret
= close(ctx
.dev_null_fd
);
203 ret
= munmap(ctx
.ptr
, PAGE_COUNT
* ctx
.pagesize
);