Release 1.3.7.
[wine/gsoc-2012-control.git] / server / mapping.c
blobc8b8edfa2e6b5ecb60d5cec2d6fc1e8d533a8d41
1 /*
2 * Server-side file mapping management
4 * Copyright (C) 1999 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include "config.h"
22 #include "wine/port.h"
24 #include <assert.h>
25 #include <stdarg.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <sys/stat.h>
29 #include <unistd.h>
31 #include "ntstatus.h"
32 #define WIN32_NO_STATUS
33 #include "windef.h"
34 #include "winternl.h"
36 #include "file.h"
37 #include "handle.h"
38 #include "thread.h"
39 #include "request.h"
40 #include "security.h"
42 /* list of memory ranges, used to store committed info */
43 struct ranges
45 unsigned int count;
46 unsigned int max;
47 struct range
49 file_pos_t start;
50 file_pos_t end;
51 } ranges[1];
54 struct mapping
56 struct object obj; /* object header */
57 mem_size_t size; /* mapping size */
58 int protect; /* protection flags */
59 struct fd *fd; /* fd for mapped file */
60 int header_size; /* size of headers (for PE image mapping) */
61 client_ptr_t base; /* default base addr (for PE image mapping) */
62 struct ranges *committed; /* list of committed ranges in this mapping */
63 struct file *shared_file; /* temp file for shared PE mapping */
64 struct list shared_entry; /* entry in global shared PE mappings list */
67 static void mapping_dump( struct object *obj, int verbose );
68 static struct object_type *mapping_get_type( struct object *obj );
69 static struct fd *mapping_get_fd( struct object *obj );
70 static unsigned int mapping_map_access( struct object *obj, unsigned int access );
71 static void mapping_destroy( struct object *obj );
72 static enum server_fd_type mapping_get_fd_type( struct fd *fd );
74 static const struct object_ops mapping_ops =
76 sizeof(struct mapping), /* size */
77 mapping_dump, /* dump */
78 mapping_get_type, /* get_type */
79 no_add_queue, /* add_queue */
80 NULL, /* remove_queue */
81 NULL, /* signaled */
82 NULL, /* satisfied */
83 no_signal, /* signal */
84 mapping_get_fd, /* get_fd */
85 mapping_map_access, /* map_access */
86 default_get_sd, /* get_sd */
87 default_set_sd, /* set_sd */
88 no_lookup_name, /* lookup_name */
89 no_open_file, /* open_file */
90 fd_close_handle, /* close_handle */
91 mapping_destroy /* destroy */
94 static const struct fd_ops mapping_fd_ops =
96 default_fd_get_poll_events, /* get_poll_events */
97 default_poll_event, /* poll_event */
98 no_flush, /* flush */
99 mapping_get_fd_type, /* get_fd_type */
100 no_fd_ioctl, /* ioctl */
101 no_fd_queue_async, /* queue_async */
102 default_fd_reselect_async, /* reselect_async */
103 default_fd_cancel_async /* cancel_async */
106 static struct list shared_list = LIST_INIT(shared_list);
108 #ifdef __i386__
110 /* These are always the same on an i386, and it will be faster this way */
111 # define page_mask 0xfff
112 # define page_shift 12
113 # define init_page_size() do { /* nothing */ } while(0)
115 #else /* __i386__ */
117 static int page_shift, page_mask;
119 static void init_page_size(void)
121 int page_size;
122 # ifdef HAVE_GETPAGESIZE
123 page_size = getpagesize();
124 # else
125 # ifdef __svr4__
126 page_size = sysconf(_SC_PAGESIZE);
127 # else
128 # error Cannot get the page size on this platform
129 # endif
130 # endif
131 page_mask = page_size - 1;
132 /* Make sure we have a power of 2 */
133 assert( !(page_size & page_mask) );
134 page_shift = 0;
135 while ((1 << page_shift) != page_size) page_shift++;
137 #endif /* __i386__ */
139 #define ROUND_SIZE(size) (((size) + page_mask) & ~page_mask)
142 /* extend a file beyond the current end of file */
143 static int grow_file( int unix_fd, file_pos_t new_size )
145 static const char zero;
146 off_t size = new_size;
148 if (sizeof(new_size) > sizeof(size) && size != new_size)
150 set_error( STATUS_INVALID_PARAMETER );
151 return 0;
153 /* extend the file one byte beyond the requested size and then truncate it */
154 /* this should work around ftruncate implementations that can't extend files */
155 if (pwrite( unix_fd, &zero, 1, size ) != -1)
157 ftruncate( unix_fd, size );
158 return 1;
160 file_set_error();
161 return 0;
164 /* create a temp file for anonymous mappings */
165 static int create_temp_file( file_pos_t size )
167 char tmpfn[16];
168 int fd;
170 sprintf( tmpfn, "anonmap.XXXXXX" ); /* create it in the server directory */
171 fd = mkstemps( tmpfn, 0 );
172 if (fd != -1)
174 if (!grow_file( fd, size ))
176 close( fd );
177 fd = -1;
179 unlink( tmpfn );
181 else file_set_error();
182 return fd;
185 /* find the shared PE mapping for a given mapping */
186 static struct file *get_shared_file( struct mapping *mapping )
188 struct mapping *ptr;
190 LIST_FOR_EACH_ENTRY( ptr, &shared_list, struct mapping, shared_entry )
191 if (is_same_file_fd( ptr->fd, mapping->fd ))
192 return (struct file *)grab_object( ptr->shared_file );
193 return NULL;
196 /* return the size of the memory mapping and file range of a given section */
197 static inline void get_section_sizes( const IMAGE_SECTION_HEADER *sec, size_t *map_size,
198 off_t *file_start, size_t *file_size )
200 static const unsigned int sector_align = 0x1ff;
202 if (!sec->Misc.VirtualSize) *map_size = ROUND_SIZE( sec->SizeOfRawData );
203 else *map_size = ROUND_SIZE( sec->Misc.VirtualSize );
205 *file_start = sec->PointerToRawData & ~sector_align;
206 *file_size = (sec->SizeOfRawData + (sec->PointerToRawData & sector_align) + sector_align) & ~sector_align;
207 if (*file_size > *map_size) *file_size = *map_size;
210 /* add a range to the committed list */
211 static void add_committed_range( struct mapping *mapping, file_pos_t start, file_pos_t end )
213 unsigned int i, j;
214 struct range *ranges;
216 if (!mapping->committed) return; /* everything committed already */
218 for (i = 0, ranges = mapping->committed->ranges; i < mapping->committed->count; i++)
220 if (ranges[i].start > end) break;
221 if (ranges[i].end < start) continue;
222 if (ranges[i].start > start) ranges[i].start = start; /* extend downwards */
223 if (ranges[i].end < end) /* extend upwards and maybe merge with next */
225 for (j = i + 1; j < mapping->committed->count; j++)
227 if (ranges[j].start > end) break;
228 if (ranges[j].end > end) end = ranges[j].end;
230 if (j > i + 1)
232 memmove( &ranges[i + 1], &ranges[j], (mapping->committed->count - j) * sizeof(*ranges) );
233 mapping->committed->count -= j - (i + 1);
235 ranges[i].end = end;
237 return;
240 /* now add a new range */
242 if (mapping->committed->count == mapping->committed->max)
244 unsigned int new_size = mapping->committed->max * 2;
245 struct ranges *new_ptr = realloc( mapping->committed, offsetof( struct ranges, ranges[new_size] ));
246 if (!new_ptr) return;
247 new_ptr->max = new_size;
248 ranges = new_ptr->ranges;
249 mapping->committed = new_ptr;
251 memmove( &ranges[i + 1], &ranges[i], (mapping->committed->count - i) * sizeof(*ranges) );
252 ranges[i].start = start;
253 ranges[i].end = end;
254 mapping->committed->count++;
257 /* find the range containing start and return whether it's committed */
258 static int find_committed_range( struct mapping *mapping, file_pos_t start, mem_size_t *size )
260 unsigned int i;
261 struct range *ranges;
263 if (!mapping->committed) /* everything is committed */
265 *size = mapping->size - start;
266 return 1;
268 for (i = 0, ranges = mapping->committed->ranges; i < mapping->committed->count; i++)
270 if (ranges[i].start > start)
272 *size = ranges[i].start - start;
273 return 0;
275 if (ranges[i].end > start)
277 *size = ranges[i].end - start;
278 return 1;
281 *size = mapping->size - start;
282 return 0;
285 /* allocate and fill the temp file for a shared PE image mapping */
286 static int build_shared_mapping( struct mapping *mapping, int fd,
287 IMAGE_SECTION_HEADER *sec, unsigned int nb_sec )
289 unsigned int i;
290 mem_size_t total_size;
291 size_t file_size, map_size, max_size;
292 off_t shared_pos, read_pos, write_pos;
293 char *buffer = NULL;
294 int shared_fd;
295 long toread;
297 /* compute the total size of the shared mapping */
299 total_size = max_size = 0;
300 for (i = 0; i < nb_sec; i++)
302 if ((sec[i].Characteristics & IMAGE_SCN_MEM_SHARED) &&
303 (sec[i].Characteristics & IMAGE_SCN_MEM_WRITE))
305 get_section_sizes( &sec[i], &map_size, &read_pos, &file_size );
306 if (file_size > max_size) max_size = file_size;
307 total_size += map_size;
310 if (!total_size) return 1; /* nothing to do */
312 if ((mapping->shared_file = get_shared_file( mapping ))) return 1;
314 /* create a temp file for the mapping */
316 if ((shared_fd = create_temp_file( total_size )) == -1) return 0;
317 if (!(mapping->shared_file = create_file_for_fd( shared_fd, FILE_GENERIC_READ|FILE_GENERIC_WRITE, 0 )))
318 return 0;
320 if (!(buffer = malloc( max_size ))) goto error;
322 /* copy the shared sections data into the temp file */
324 shared_pos = 0;
325 for (i = 0; i < nb_sec; i++)
327 if (!(sec[i].Characteristics & IMAGE_SCN_MEM_SHARED)) continue;
328 if (!(sec[i].Characteristics & IMAGE_SCN_MEM_WRITE)) continue;
329 get_section_sizes( &sec[i], &map_size, &read_pos, &file_size );
330 write_pos = shared_pos;
331 shared_pos += map_size;
332 if (!sec[i].PointerToRawData || !file_size) continue;
333 toread = file_size;
334 while (toread)
336 long res = pread( fd, buffer + file_size - toread, toread, read_pos );
337 if (!res && toread < 0x200) /* partial sector at EOF is not an error */
339 file_size -= toread;
340 break;
342 if (res <= 0) goto error;
343 toread -= res;
344 read_pos += res;
346 if (pwrite( shared_fd, buffer, file_size, write_pos ) != file_size) goto error;
348 free( buffer );
349 return 1;
351 error:
352 release_object( mapping->shared_file );
353 mapping->shared_file = NULL;
354 free( buffer );
355 return 0;
358 /* retrieve the mapping parameters for an executable (PE) image */
359 static int get_image_params( struct mapping *mapping, int unix_fd )
361 IMAGE_DOS_HEADER dos;
362 IMAGE_SECTION_HEADER *sec = NULL;
363 struct
365 DWORD Signature;
366 IMAGE_FILE_HEADER FileHeader;
367 union
369 IMAGE_OPTIONAL_HEADER32 hdr32;
370 IMAGE_OPTIONAL_HEADER64 hdr64;
371 } opt;
372 } nt;
373 off_t pos;
374 int size;
376 /* load the headers */
378 if (pread( unix_fd, &dos, sizeof(dos), 0 ) != sizeof(dos)) goto error;
379 if (dos.e_magic != IMAGE_DOS_SIGNATURE) goto error;
380 pos = dos.e_lfanew;
382 size = pread( unix_fd, &nt, sizeof(nt), pos );
383 if (size < sizeof(nt.Signature) + sizeof(nt.FileHeader)) goto error;
384 /* zero out Optional header in the case it's not present or partial */
385 if (size < sizeof(nt)) memset( (char *)&nt + size, 0, sizeof(nt) - size );
386 if (nt.Signature != IMAGE_NT_SIGNATURE) goto error;
388 switch (nt.opt.hdr32.Magic)
390 case IMAGE_NT_OPTIONAL_HDR32_MAGIC:
391 mapping->size = ROUND_SIZE( nt.opt.hdr32.SizeOfImage );
392 mapping->base = nt.opt.hdr32.ImageBase;
393 mapping->header_size = nt.opt.hdr32.SizeOfHeaders;
394 break;
395 case IMAGE_NT_OPTIONAL_HDR64_MAGIC:
396 mapping->size = ROUND_SIZE( nt.opt.hdr64.SizeOfImage );
397 mapping->base = nt.opt.hdr64.ImageBase;
398 mapping->header_size = nt.opt.hdr64.SizeOfHeaders;
399 break;
400 default:
401 goto error;
404 /* load the section headers */
406 pos += sizeof(nt.Signature) + sizeof(nt.FileHeader) + nt.FileHeader.SizeOfOptionalHeader;
407 size = sizeof(*sec) * nt.FileHeader.NumberOfSections;
408 if (pos + size > mapping->size) goto error;
409 if (pos + size > mapping->header_size) mapping->header_size = pos + size;
410 if (!(sec = malloc( size ))) goto error;
411 if (pread( unix_fd, sec, size, pos ) != size) goto error;
413 if (!build_shared_mapping( mapping, unix_fd, sec, nt.FileHeader.NumberOfSections )) goto error;
415 if (mapping->shared_file) list_add_head( &shared_list, &mapping->shared_entry );
417 mapping->protect = VPROT_IMAGE;
418 free( sec );
419 return 1;
421 error:
422 free( sec );
423 set_error( STATUS_INVALID_FILE_FOR_SECTION );
424 return 0;
427 static struct object *create_mapping( struct directory *root, const struct unicode_str *name,
428 unsigned int attr, mem_size_t size, int protect,
429 obj_handle_t handle, const struct security_descriptor *sd )
431 struct mapping *mapping;
432 struct file *file;
433 struct fd *fd;
434 int access = 0;
435 int unix_fd;
436 struct stat st;
438 if (!page_mask) init_page_size();
440 if (!(mapping = create_named_object_dir( root, name, attr, &mapping_ops )))
441 return NULL;
442 if (get_error() == STATUS_OBJECT_NAME_EXISTS)
443 return &mapping->obj; /* Nothing else to do */
445 if (sd) default_set_sd( &mapping->obj, sd, OWNER_SECURITY_INFORMATION|
446 GROUP_SECURITY_INFORMATION|
447 DACL_SECURITY_INFORMATION|
448 SACL_SECURITY_INFORMATION );
449 mapping->header_size = 0;
450 mapping->base = 0;
451 mapping->fd = NULL;
452 mapping->shared_file = NULL;
453 mapping->committed = NULL;
455 if (protect & VPROT_READ) access |= FILE_READ_DATA;
456 if (protect & VPROT_WRITE) access |= FILE_WRITE_DATA;
458 if (handle)
460 unsigned int mapping_access = FILE_MAPPING_ACCESS;
462 if (!(protect & VPROT_COMMITTED))
464 set_error( STATUS_INVALID_PARAMETER );
465 goto error;
467 if (!(file = get_file_obj( current->process, handle, access ))) goto error;
468 fd = get_obj_fd( (struct object *)file );
470 /* file sharing rules for mappings are different so we use magic the access rights */
471 if (protect & VPROT_IMAGE) mapping_access |= FILE_MAPPING_IMAGE;
472 else if (protect & VPROT_WRITE) mapping_access |= FILE_MAPPING_WRITE;
473 mapping->fd = dup_fd_object( fd, mapping_access,
474 FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
475 FILE_SYNCHRONOUS_IO_NONALERT );
476 release_object( file );
477 release_object( fd );
478 if (!mapping->fd) goto error;
480 set_fd_user( mapping->fd, &mapping_fd_ops, &mapping->obj );
481 if ((unix_fd = get_unix_fd( mapping->fd )) == -1) goto error;
482 if (protect & VPROT_IMAGE)
484 if (!get_image_params( mapping, unix_fd )) goto error;
485 return &mapping->obj;
487 if (fstat( unix_fd, &st ) == -1)
489 file_set_error();
490 goto error;
492 if (!size)
494 if (!(size = st.st_size))
496 set_error( STATUS_MAPPED_FILE_SIZE_ZERO );
497 goto error;
500 else if (st.st_size < size && !grow_file( unix_fd, size )) goto error;
502 else /* Anonymous mapping (no associated file) */
504 if (!size || (protect & VPROT_IMAGE))
506 set_error( STATUS_INVALID_PARAMETER );
507 goto error;
509 if (!(protect & VPROT_COMMITTED))
511 if (!(mapping->committed = mem_alloc( offsetof(struct ranges, ranges[8]) ))) goto error;
512 mapping->committed->count = 0;
513 mapping->committed->max = 8;
515 if ((unix_fd = create_temp_file( size )) == -1) goto error;
516 if (!(mapping->fd = create_anonymous_fd( &mapping_fd_ops, unix_fd, &mapping->obj,
517 FILE_SYNCHRONOUS_IO_NONALERT ))) goto error;
518 allow_fd_caching( mapping->fd );
520 mapping->size = (size + page_mask) & ~((mem_size_t)page_mask);
521 mapping->protect = protect;
522 return &mapping->obj;
524 error:
525 release_object( mapping );
526 return NULL;
529 static void mapping_dump( struct object *obj, int verbose )
531 struct mapping *mapping = (struct mapping *)obj;
532 assert( obj->ops == &mapping_ops );
533 fprintf( stderr, "Mapping size=%08x%08x prot=%08x fd=%p header_size=%08x base=%08lx "
534 "shared_file=%p ",
535 (unsigned int)(mapping->size >> 32), (unsigned int)mapping->size,
536 mapping->protect, mapping->fd, mapping->header_size,
537 (unsigned long)mapping->base, mapping->shared_file );
538 dump_object_name( &mapping->obj );
539 fputc( '\n', stderr );
542 static struct object_type *mapping_get_type( struct object *obj )
544 static const WCHAR name[] = {'S','e','c','t','i','o','n'};
545 static const struct unicode_str str = { name, sizeof(name) };
546 return get_object_type( &str );
549 static struct fd *mapping_get_fd( struct object *obj )
551 struct mapping *mapping = (struct mapping *)obj;
552 return (struct fd *)grab_object( mapping->fd );
555 static unsigned int mapping_map_access( struct object *obj, unsigned int access )
557 if (access & GENERIC_READ) access |= STANDARD_RIGHTS_READ | SECTION_QUERY | SECTION_MAP_READ;
558 if (access & GENERIC_WRITE) access |= STANDARD_RIGHTS_WRITE | SECTION_MAP_WRITE;
559 if (access & GENERIC_EXECUTE) access |= STANDARD_RIGHTS_EXECUTE | SECTION_MAP_EXECUTE;
560 if (access & GENERIC_ALL) access |= SECTION_ALL_ACCESS;
561 return access & ~(GENERIC_READ | GENERIC_WRITE | GENERIC_EXECUTE | GENERIC_ALL);
564 static void mapping_destroy( struct object *obj )
566 struct mapping *mapping = (struct mapping *)obj;
567 assert( obj->ops == &mapping_ops );
568 if (mapping->fd) release_object( mapping->fd );
569 if (mapping->shared_file)
571 release_object( mapping->shared_file );
572 list_remove( &mapping->shared_entry );
574 free( mapping->committed );
577 static enum server_fd_type mapping_get_fd_type( struct fd *fd )
579 return FD_TYPE_FILE;
582 int get_page_size(void)
584 if (!page_mask) init_page_size();
585 return page_mask + 1;
588 /* create a file mapping */
589 DECL_HANDLER(create_mapping)
591 struct object *obj;
592 struct unicode_str name;
593 struct directory *root = NULL;
594 const struct object_attributes *objattr = get_req_data();
595 const struct security_descriptor *sd;
597 reply->handle = 0;
599 if (!objattr_is_valid( objattr, get_req_data_size() ))
600 return;
602 sd = objattr->sd_len ? (const struct security_descriptor *)(objattr + 1) : NULL;
603 objattr_get_name( objattr, &name );
605 if (objattr->rootdir && !(root = get_directory_obj( current->process, objattr->rootdir, 0 )))
606 return;
608 if ((obj = create_mapping( root, &name, req->attributes, req->size, req->protect, req->file_handle, sd )))
610 if (get_error() == STATUS_OBJECT_NAME_EXISTS)
611 reply->handle = alloc_handle( current->process, obj, req->access, req->attributes );
612 else
613 reply->handle = alloc_handle_no_access_check( current->process, obj, req->access, req->attributes );
614 release_object( obj );
617 if (root) release_object( root );
620 /* open a handle to a mapping */
621 DECL_HANDLER(open_mapping)
623 struct unicode_str name;
624 struct directory *root = NULL;
625 struct mapping *mapping;
627 get_req_unicode_str( &name );
628 if (req->rootdir && !(root = get_directory_obj( current->process, req->rootdir, 0 )))
629 return;
631 if ((mapping = open_object_dir( root, &name, req->attributes, &mapping_ops )))
633 reply->handle = alloc_handle( current->process, &mapping->obj, req->access, req->attributes );
634 release_object( mapping );
637 if (root) release_object( root );
640 /* get a mapping information */
641 DECL_HANDLER(get_mapping_info)
643 struct mapping *mapping;
644 struct fd *fd;
646 if ((mapping = (struct mapping *)get_handle_obj( current->process, req->handle,
647 req->access, &mapping_ops )))
649 reply->size = mapping->size;
650 reply->protect = mapping->protect;
651 reply->header_size = mapping->header_size;
652 reply->base = mapping->base;
653 reply->shared_file = 0;
654 if ((fd = get_obj_fd( &mapping->obj )))
656 if (!is_fd_removable(fd))
657 reply->mapping = alloc_handle( current->process, mapping, 0, 0 );
658 release_object( fd );
660 if (mapping->shared_file)
662 if (!(reply->shared_file = alloc_handle( current->process, mapping->shared_file,
663 GENERIC_READ|GENERIC_WRITE, 0 )))
665 if (reply->mapping) close_handle( current->process, reply->mapping );
668 release_object( mapping );
672 /* get a range of committed pages in a file mapping */
673 DECL_HANDLER(get_mapping_committed_range)
675 struct mapping *mapping;
677 if ((mapping = (struct mapping *)get_handle_obj( current->process, req->handle, 0, &mapping_ops )))
679 if (!(req->offset & page_mask) && req->offset < mapping->size)
680 reply->committed = find_committed_range( mapping, req->offset, &reply->size );
681 else
682 set_error( STATUS_INVALID_PARAMETER );
684 release_object( mapping );
688 /* add a range to the committed pages in a file mapping */
689 DECL_HANDLER(add_mapping_committed_range)
691 struct mapping *mapping;
693 if ((mapping = (struct mapping *)get_handle_obj( current->process, req->handle, 0, &mapping_ops )))
695 if (!(req->size & page_mask) &&
696 !(req->offset & page_mask) &&
697 req->offset < mapping->size &&
698 req->size > 0 &&
699 req->size <= mapping->size - req->offset)
700 add_committed_range( mapping, req->offset, req->offset + req->size );
701 else
702 set_error( STATUS_INVALID_PARAMETER );
704 release_object( mapping );