2 * Wine memory mappings support
4 * Copyright 2000, 2004 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include "wine/port.h"
29 #include <sys/types.h>
30 #ifdef HAVE_SYS_MMAN_H
40 #include "wine/library.h"
41 #include "wine/list.h"
50 static struct list reserved_areas
= LIST_INIT(reserved_areas
);
51 static const int granularity_mask
= 0xffff; /* reserved areas have 64k granularity */
54 #define MAP_NORESERVE 0
58 static inline int munmap( void *ptr
, size_t size
) { return 0; }
62 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
63 /***********************************************************************
66 * The purpose of this routine is to emulate the behaviour of
67 * the Linux mmap() routine if a non-NULL address is passed,
68 * but the MAP_FIXED flag is not set. Linux in this case tries
69 * to place the mapping at the specified address, *unless* the
70 * range is already in use. Solaris, however, completely ignores
71 * the address argument in this case.
73 * As Wine code occasionally relies on the Linux behaviour, e.g. to
74 * be able to map non-relocateable PE executables to their proper
75 * start addresses, or to map the DOS memory to 0, this routine
76 * emulates the Linux behaviour by checking whether the desired
77 * address range is still available, and placing the mapping there
78 * using MAP_FIXED if so.
80 static int try_mmap_fixed (void *addr
, size_t len
, int prot
, int flags
,
81 int fildes
, off_t off
)
83 char * volatile result
= NULL
;
84 int pagesize
= getpagesize();
87 /* We only try to map to a fixed address if
88 addr is non-NULL and properly aligned,
89 and MAP_FIXED isn't already specified. */
93 if ( (uintptr_t)addr
& (pagesize
-1) )
95 if ( flags
& MAP_FIXED
)
98 /* We use vfork() to freeze all threads of the
99 current process. This allows us to check without
100 race condition whether the desired memory range is
101 already in use. Note that because vfork() shares
102 the address spaces between parent and child, we
103 can actually perform the mapping in the child. */
105 if ( (pid
= vfork()) == -1 )
107 perror("try_mmap_fixed: vfork");
115 /* We call mincore() for every page in the desired range.
116 If any of these calls succeeds, the page is already
117 mapped and we must fail. */
118 for ( i
= 0; i
< len
; i
+= pagesize
)
119 if ( mincore( (caddr_t
)addr
+ i
, pagesize
, &vec
) != -1 )
122 /* Perform the mapping with MAP_FIXED set. This is safe
123 now, as none of the pages is currently in use. */
124 result
= mmap( addr
, len
, prot
, flags
| MAP_FIXED
, fildes
, off
);
125 if ( result
== addr
)
128 if ( result
!= (void *) -1 ) /* This should never happen ... */
129 munmap( result
, len
);
134 /* vfork() lets the parent continue only after the child
135 has exited. Furthermore, Wine sets SIGCHLD to SIG_IGN,
136 so we don't need to wait for the child. */
138 return result
== addr
;
140 #endif /* (__svr4__ || __NetBSD__) && !MAP_TRYFIXED */
143 /***********************************************************************
146 * Portable wrapper for anonymous mmaps
148 void *wine_anon_mmap( void *start
, size_t size
, int prot
, int flags
)
151 static int fdzero
= -1;
158 if ((fdzero
= open( "/dev/zero", O_RDONLY
)) == -1)
160 perror( "/dev/zero: open" );
164 #endif /* MAP_ANON */
167 flags
&= ~MAP_SHARED
;
170 /* Linux EINVAL's on us if we don't pass MAP_PRIVATE to an anon mmap */
172 flags
|= MAP_PRIVATE
;
175 if (!(flags
& MAP_FIXED
))
178 /* Even FreeBSD 5.3 does not properly support NULL here. */
179 if( start
== NULL
) start
= (void *)0x110000;
183 /* If available, this will attempt a fixed mapping in-kernel */
184 flags
|= MAP_TRYFIXED
;
185 #elif defined(__svr4__) || defined(__NetBSD__)
186 if ( try_mmap_fixed( start
, size
, prot
, flags
, fdzero
, 0 ) )
190 return mmap( start
, size
, prot
, flags
, fdzero
, 0 );
199 /***********************************************************************
202 * Reserve as much memory as possible in the given area.
203 * FIXME: probably needs a different algorithm for Solaris
205 static void reserve_area( void *addr
, void *end
)
208 size_t size
= (char *)end
- (char *)addr
;
212 if ((ptr
= wine_anon_mmap( addr
, size
, PROT_NONE
, MAP_NORESERVE
)) != (void *)-1)
216 wine_mmap_add_reserved_area( addr
, size
);
219 else munmap( ptr
, size
);
221 if (size
> granularity_mask
+ 1)
223 size_t new_size
= (size
/ 2) & ~granularity_mask
;
224 reserve_area( addr
, (char *)addr
+ new_size
);
225 reserve_area( (char *)addr
+ new_size
, end
);
230 /***********************************************************************
233 * Reserve the DOS area (0x00000000-0x00110000).
235 static void reserve_dos_area(void)
237 const size_t page_size
= getpagesize();
238 const size_t dos_area_size
= 0x110000;
241 /* first page has to be handled specially */
242 ptr
= wine_anon_mmap( (void *)page_size
, dos_area_size
- page_size
, PROT_NONE
, MAP_NORESERVE
);
243 if (ptr
!= (void *)page_size
)
245 if (ptr
!= (void *)-1) munmap( ptr
, dos_area_size
- page_size
);
248 /* now add first page with MAP_FIXED */
249 wine_anon_mmap( NULL
, page_size
, PROT_NONE
, MAP_NORESERVE
|MAP_FIXED
);
250 wine_mmap_add_reserved_area( NULL
, dos_area_size
);
254 /***********************************************************************
259 struct reserved_area
*area
;
261 #if defined(__i386__) && !defined(__FreeBSD__) /* commented out until FreeBSD gets fixed */
263 char * const stack_ptr
= &stack
;
264 char *user_space_limit
= (char *)0x80000000;
266 /* check for a reserved area starting at the user space limit */
267 /* to avoid wasting time trying to allocate it again */
268 LIST_FOR_EACH( ptr
, &reserved_areas
)
270 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
271 if ((char *)area
->base
> user_space_limit
) break;
272 if ((char *)area
->base
+ area
->size
> user_space_limit
)
274 user_space_limit
= (char *)area
->base
+ area
->size
;
279 if (stack_ptr
>= user_space_limit
)
281 char *base
= stack_ptr
- ((unsigned int)stack_ptr
& granularity_mask
) - (granularity_mask
+ 1);
282 if (base
> user_space_limit
) reserve_area( user_space_limit
, base
);
283 base
= stack_ptr
- ((unsigned int)stack_ptr
& granularity_mask
) + (granularity_mask
+ 1);
285 /* Linux heuristic: if the stack top is at c0000000, assume the address space */
286 /* ends there, this avoids a lot of futile allocation attempts */
287 if (base
!= (char *)0xc0000000)
289 reserve_area( base
, 0 );
291 else reserve_area( user_space_limit
, 0 );
292 #endif /* __i386__ */
294 /* reserve the DOS area if not already done */
296 ptr
= list_head( &reserved_areas
);
299 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
300 if (!area
->base
) return; /* already reserved */
305 #else /* HAVE_MMAP */
313 /***********************************************************************
314 * wine_mmap_add_reserved_area
316 * Add an address range to the list of reserved areas.
317 * Caller must have made sure the range is not used by anything else.
319 * Note: the reserved areas functions are not reentrant, caller is
320 * responsible for proper locking.
322 void wine_mmap_add_reserved_area( void *addr
, size_t size
)
324 struct reserved_area
*area
;
327 if (!((char *)addr
+ size
)) size
--; /* avoid wrap-around */
329 LIST_FOR_EACH( ptr
, &reserved_areas
)
331 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
332 if (area
->base
> addr
)
334 /* try to merge with the next one */
335 if ((char *)addr
+ size
== (char *)area
->base
)
343 else if ((char *)area
->base
+ area
->size
== (char *)addr
)
345 /* merge with the previous one */
348 /* try to merge with the next one too */
349 if ((ptr
= list_next( &reserved_areas
, ptr
)))
351 struct reserved_area
*next
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
352 if ((char *)addr
+ size
== (char *)next
->base
)
354 area
->size
+= next
->size
;
355 list_remove( &next
->entry
);
363 if ((area
= malloc( sizeof(*area
) )))
367 list_add_before( ptr
, &area
->entry
);
372 /***********************************************************************
373 * wine_mmap_remove_reserved_area
375 * Remove an address range from the list of reserved areas.
376 * If 'unmap' is non-zero the range is unmapped too.
378 * Note: the reserved areas functions are not reentrant, caller is
379 * responsible for proper locking.
381 void wine_mmap_remove_reserved_area( void *addr
, size_t size
, int unmap
)
383 struct reserved_area
*area
;
386 if (!((char *)addr
+ size
)) size
--; /* avoid wrap-around */
388 ptr
= list_head( &reserved_areas
);
389 /* find the first area covering address */
392 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
393 if ((char *)area
->base
>= (char *)addr
+ size
) break; /* outside the range */
394 if ((char *)area
->base
+ area
->size
> (char *)addr
) /* overlaps range */
396 if (area
->base
>= addr
)
398 if ((char *)area
->base
+ area
->size
> (char *)addr
+ size
)
400 /* range overlaps beginning of area only -> shrink area */
401 if (unmap
) munmap( area
->base
, (char *)addr
+ size
- (char *)area
->base
);
402 area
->size
-= (char *)addr
+ size
- (char *)area
->base
;
403 area
->base
= (char *)addr
+ size
;
408 /* range contains the whole area -> remove area completely */
409 ptr
= list_next( &reserved_areas
, ptr
);
410 if (unmap
) munmap( area
->base
, area
->size
);
411 list_remove( &area
->entry
);
418 if ((char *)area
->base
+ area
->size
> (char *)addr
+ size
)
420 /* range is in the middle of area -> split area in two */
421 struct reserved_area
*new_area
= malloc( sizeof(*new_area
) );
424 new_area
->base
= (char *)addr
+ size
;
425 new_area
->size
= (char *)area
->base
+ area
->size
- (char *)new_area
->base
;
426 list_add_after( ptr
, &new_area
->entry
);
428 else size
= (char *)area
->base
+ area
->size
- (char *)addr
;
429 area
->size
= (char *)addr
- (char *)area
->base
;
430 if (unmap
) munmap( addr
, size
);
435 /* range overlaps end of area only -> shrink area */
436 if (unmap
) munmap( addr
, (char *)area
->base
+ area
->size
- (char *)addr
);
437 area
->size
= (char *)addr
- (char *)area
->base
;
441 ptr
= list_next( &reserved_areas
, ptr
);
446 /***********************************************************************
447 * wine_mmap_is_in_reserved_area
449 * Check if the specified range is included in a reserved area.
450 * Returns 1 if range is fully included, 0 if range is not included
451 * at all, and -1 if it is only partially included.
453 * Note: the reserved areas functions are not reentrant, caller is
454 * responsible for proper locking.
456 int wine_mmap_is_in_reserved_area( void *addr
, size_t size
)
458 struct reserved_area
*area
;
461 LIST_FOR_EACH( ptr
, &reserved_areas
)
463 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
464 if (area
->base
> addr
) break;
465 if ((char *)area
->base
+ area
->size
<= (char *)addr
) continue;
466 /* area must contain block completely */
467 if ((char *)area
->base
+ area
->size
< (char *)addr
+ size
) return -1;