1 /* This is the Linux kernel elf-loading code, ported into user space */
24 /* from personality.h */
27 * Flags for bug emulation.
29 * These occupy the top three bytes.
32 ADDR_NO_RANDOMIZE
= 0x0040000, /* disable randomization of VA space */
33 FDPIC_FUNCPTRS
= 0x0080000, /* userspace function ptrs point to descriptors
36 MMAP_PAGE_ZERO
= 0x0100000,
37 ADDR_COMPAT_LAYOUT
= 0x0200000,
38 READ_IMPLIES_EXEC
= 0x0400000,
39 ADDR_LIMIT_32BIT
= 0x0800000,
40 SHORT_INODE
= 0x1000000,
41 WHOLE_SECONDS
= 0x2000000,
42 STICKY_TIMEOUTS
= 0x4000000,
43 ADDR_LIMIT_3GB
= 0x8000000,
49 * These go in the low byte. Avoid using the top bit, it will
50 * conflict with error returns.
54 PER_LINUX_32BIT
= 0x0000 | ADDR_LIMIT_32BIT
,
55 PER_LINUX_FDPIC
= 0x0000 | FDPIC_FUNCPTRS
,
56 PER_SVR4
= 0x0001 | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
57 PER_SVR3
= 0x0002 | STICKY_TIMEOUTS
| SHORT_INODE
,
58 PER_SCOSVR3
= 0x0003 | STICKY_TIMEOUTS
|
59 WHOLE_SECONDS
| SHORT_INODE
,
60 PER_OSR5
= 0x0003 | STICKY_TIMEOUTS
| WHOLE_SECONDS
,
61 PER_WYSEV386
= 0x0004 | STICKY_TIMEOUTS
| SHORT_INODE
,
62 PER_ISCR4
= 0x0005 | STICKY_TIMEOUTS
,
64 PER_SUNOS
= 0x0006 | STICKY_TIMEOUTS
,
65 PER_XENIX
= 0x0007 | STICKY_TIMEOUTS
| SHORT_INODE
,
67 PER_LINUX32_3GB
= 0x0008 | ADDR_LIMIT_3GB
,
68 PER_IRIX32
= 0x0009 | STICKY_TIMEOUTS
,/* IRIX5 32-bit */
69 PER_IRIXN32
= 0x000a | STICKY_TIMEOUTS
,/* IRIX6 new 32-bit */
70 PER_IRIX64
= 0x000b | STICKY_TIMEOUTS
,/* IRIX6 64-bit */
72 PER_SOLARIS
= 0x000d | STICKY_TIMEOUTS
,
73 PER_UW7
= 0x000e | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
74 PER_OSF4
= 0x000f, /* OSF/1 v4 */
80 * Return the base personality without flags.
82 #define personality(pers) (pers & PER_MASK)
84 /* this flag is uneffective under linux too, should be deleted */
86 #define MAP_DENYWRITE 0
89 /* should probably go in elf.h */
96 #define ELF_PLATFORM get_elf_platform()
98 static const char *get_elf_platform(void)
100 static char elf_platform
[] = "i386";
101 int family
= (thread_env
->cpuid_version
>> 8) & 0xff;
105 elf_platform
[1] = '0' + family
;
109 #define ELF_HWCAP get_elf_hwcap()
111 static uint32_t get_elf_hwcap(void)
113 return thread_env
->cpuid_features
;
117 #define ELF_START_MMAP 0x2aaaaab000ULL
118 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
120 #define ELF_CLASS ELFCLASS64
121 #define ELF_DATA ELFDATA2LSB
122 #define ELF_ARCH EM_X86_64
124 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
127 regs
->rsp
= infop
->start_stack
;
128 regs
->rip
= infop
->entry
;
133 #define ELF_START_MMAP 0x80000000
136 * This is used to ensure we don't load something for the wrong architecture.
138 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
141 * These are used to set parameters in the core dumps.
143 #define ELF_CLASS ELFCLASS32
144 #define ELF_DATA ELFDATA2LSB
145 #define ELF_ARCH EM_386
147 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
149 regs
->esp
= infop
->start_stack
;
150 regs
->eip
= infop
->entry
;
152 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
153 starts %edx contains a pointer to a function which might be
154 registered using `atexit'. This provides a mean for the
155 dynamic linker to call DT_FINI functions for shared libraries
156 that have been loaded before the code runs.
158 A value of 0 tells we have no such handler. */
163 #define USE_ELF_CORE_DUMP
164 #define ELF_EXEC_PAGESIZE 4096
170 #define ELF_START_MMAP 0x80000000
172 #define elf_check_arch(x) ( (x) == EM_ARM )
174 #define ELF_CLASS ELFCLASS32
175 #ifdef TARGET_WORDS_BIGENDIAN
176 #define ELF_DATA ELFDATA2MSB
178 #define ELF_DATA ELFDATA2LSB
180 #define ELF_ARCH EM_ARM
182 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
184 abi_long stack
= infop
->start_stack
;
185 memset(regs
, 0, sizeof(*regs
));
186 regs
->ARM_cpsr
= 0x10;
187 if (infop
->entry
& 1)
188 regs
->ARM_cpsr
|= CPSR_T
;
189 regs
->ARM_pc
= infop
->entry
& 0xfffffffe;
190 regs
->ARM_sp
= infop
->start_stack
;
191 /* FIXME - what to for failure of get_user()? */
192 get_user_ual(regs
->ARM_r2
, stack
+ 8); /* envp */
193 get_user_ual(regs
->ARM_r1
, stack
+ 4); /* envp */
194 /* XXX: it seems that r0 is zeroed after ! */
196 /* For uClinux PIC binaries. */
197 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
198 regs
->ARM_r10
= infop
->start_data
;
201 #define USE_ELF_CORE_DUMP
202 #define ELF_EXEC_PAGESIZE 4096
206 ARM_HWCAP_ARM_SWP
= 1 << 0,
207 ARM_HWCAP_ARM_HALF
= 1 << 1,
208 ARM_HWCAP_ARM_THUMB
= 1 << 2,
209 ARM_HWCAP_ARM_26BIT
= 1 << 3,
210 ARM_HWCAP_ARM_FAST_MULT
= 1 << 4,
211 ARM_HWCAP_ARM_FPA
= 1 << 5,
212 ARM_HWCAP_ARM_VFP
= 1 << 6,
213 ARM_HWCAP_ARM_EDSP
= 1 << 7,
216 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
217 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
218 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
223 #ifdef TARGET_SPARC64
225 #define ELF_START_MMAP 0x80000000
228 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
230 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
233 #define ELF_CLASS ELFCLASS64
234 #define ELF_DATA ELFDATA2MSB
235 #define ELF_ARCH EM_SPARCV9
237 #define STACK_BIAS 2047
239 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
244 regs
->pc
= infop
->entry
;
245 regs
->npc
= regs
->pc
+ 4;
248 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
250 if (personality(infop
->personality
) == PER_LINUX32
)
251 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
253 regs
->u_regs
[14] = infop
->start_stack
- 16 * 8 - STACK_BIAS
;
258 #define ELF_START_MMAP 0x80000000
260 #define elf_check_arch(x) ( (x) == EM_SPARC )
262 #define ELF_CLASS ELFCLASS32
263 #define ELF_DATA ELFDATA2MSB
264 #define ELF_ARCH EM_SPARC
266 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
269 regs
->pc
= infop
->entry
;
270 regs
->npc
= regs
->pc
+ 4;
272 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
280 #define ELF_START_MMAP 0x80000000
282 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
284 #define elf_check_arch(x) ( (x) == EM_PPC64 )
286 #define ELF_CLASS ELFCLASS64
290 #define elf_check_arch(x) ( (x) == EM_PPC )
292 #define ELF_CLASS ELFCLASS32
296 #ifdef TARGET_WORDS_BIGENDIAN
297 #define ELF_DATA ELFDATA2MSB
299 #define ELF_DATA ELFDATA2LSB
301 #define ELF_ARCH EM_PPC
303 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
304 See arch/powerpc/include/asm/cputable.h. */
306 PPC_FEATURE_32
= 0x80000000,
307 PPC_FEATURE_64
= 0x40000000,
308 PPC_FEATURE_601_INSTR
= 0x20000000,
309 PPC_FEATURE_HAS_ALTIVEC
= 0x10000000,
310 PPC_FEATURE_HAS_FPU
= 0x08000000,
311 PPC_FEATURE_HAS_MMU
= 0x04000000,
312 PPC_FEATURE_HAS_4xxMAC
= 0x02000000,
313 PPC_FEATURE_UNIFIED_CACHE
= 0x01000000,
314 PPC_FEATURE_HAS_SPE
= 0x00800000,
315 PPC_FEATURE_HAS_EFP_SINGLE
= 0x00400000,
316 PPC_FEATURE_HAS_EFP_DOUBLE
= 0x00200000,
317 PPC_FEATURE_NO_TB
= 0x00100000,
318 PPC_FEATURE_POWER4
= 0x00080000,
319 PPC_FEATURE_POWER5
= 0x00040000,
320 PPC_FEATURE_POWER5_PLUS
= 0x00020000,
321 PPC_FEATURE_CELL
= 0x00010000,
322 PPC_FEATURE_BOOKE
= 0x00008000,
323 PPC_FEATURE_SMT
= 0x00004000,
324 PPC_FEATURE_ICACHE_SNOOP
= 0x00002000,
325 PPC_FEATURE_ARCH_2_05
= 0x00001000,
326 PPC_FEATURE_PA6T
= 0x00000800,
327 PPC_FEATURE_HAS_DFP
= 0x00000400,
328 PPC_FEATURE_POWER6_EXT
= 0x00000200,
329 PPC_FEATURE_ARCH_2_06
= 0x00000100,
330 PPC_FEATURE_HAS_VSX
= 0x00000080,
331 PPC_FEATURE_PSERIES_PERFMON_COMPAT
= 0x00000040,
333 PPC_FEATURE_TRUE_LE
= 0x00000002,
334 PPC_FEATURE_PPC_LE
= 0x00000001,
337 #define ELF_HWCAP get_elf_hwcap()
339 static uint32_t get_elf_hwcap(void)
341 CPUState
*e
= thread_env
;
342 uint32_t features
= 0;
344 /* We don't have to be terribly complete here; the high points are
345 Altivec/FP/SPE support. Anything else is just a bonus. */
346 #define GET_FEATURE(flag, feature) \
347 do {if (e->insns_flags & flag) features |= feature; } while(0)
348 GET_FEATURE(PPC_64B
, PPC_FEATURE_64
);
349 GET_FEATURE(PPC_FLOAT
, PPC_FEATURE_HAS_FPU
);
350 GET_FEATURE(PPC_ALTIVEC
, PPC_FEATURE_HAS_ALTIVEC
);
351 GET_FEATURE(PPC_SPE
, PPC_FEATURE_HAS_SPE
);
352 GET_FEATURE(PPC_SPE_SINGLE
, PPC_FEATURE_HAS_EFP_SINGLE
);
353 GET_FEATURE(PPC_SPE_DOUBLE
, PPC_FEATURE_HAS_EFP_DOUBLE
);
354 GET_FEATURE(PPC_BOOKE
, PPC_FEATURE_BOOKE
);
355 GET_FEATURE(PPC_405_MAC
, PPC_FEATURE_HAS_4xxMAC
);
362 * We need to put in some extra aux table entries to tell glibc what
363 * the cache block size is, so it can use the dcbz instruction safely.
365 #define AT_DCACHEBSIZE 19
366 #define AT_ICACHEBSIZE 20
367 #define AT_UCACHEBSIZE 21
368 /* A special ignored type value for PPC, for glibc compatibility. */
369 #define AT_IGNOREPPC 22
371 * The requirements here are:
372 * - keep the final alignment of sp (sp & 0xf)
373 * - make sure the 32-bit value at the first 16 byte aligned position of
374 * AUXV is greater than 16 for glibc compatibility.
375 * AT_IGNOREPPC is used for that.
376 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
377 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
379 #define DLINFO_ARCH_ITEMS 5
380 #define ARCH_DLINFO \
382 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
383 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
384 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
386 * Now handle glibc compatibility. \
388 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
389 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
392 static inline void init_thread(struct target_pt_regs
*_regs
, struct image_info
*infop
)
394 abi_ulong pos
= infop
->start_stack
;
396 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
397 abi_ulong entry
, toc
;
400 _regs
->gpr
[1] = infop
->start_stack
;
401 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
402 entry
= ldq_raw(infop
->entry
) + infop
->load_addr
;
403 toc
= ldq_raw(infop
->entry
+ 8) + infop
->load_addr
;
405 infop
->entry
= entry
;
407 _regs
->nip
= infop
->entry
;
408 /* Note that isn't exactly what regular kernel does
409 * but this is what the ABI wants and is needed to allow
410 * execution of PPC BSD programs.
412 /* FIXME - what to for failure of get_user()? */
413 get_user_ual(_regs
->gpr
[3], pos
);
414 pos
+= sizeof(abi_ulong
);
416 for (tmp
= 1; tmp
!= 0; pos
+= sizeof(abi_ulong
))
421 #define USE_ELF_CORE_DUMP
422 #define ELF_EXEC_PAGESIZE 4096
428 #define ELF_START_MMAP 0x80000000
430 #define elf_check_arch(x) ( (x) == EM_MIPS )
433 #define ELF_CLASS ELFCLASS64
435 #define ELF_CLASS ELFCLASS32
437 #ifdef TARGET_WORDS_BIGENDIAN
438 #define ELF_DATA ELFDATA2MSB
440 #define ELF_DATA ELFDATA2LSB
442 #define ELF_ARCH EM_MIPS
444 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
446 regs
->cp0_status
= 2 << CP0St_KSU
;
447 regs
->cp0_epc
= infop
->entry
;
448 regs
->regs
[29] = infop
->start_stack
;
451 #define USE_ELF_CORE_DUMP
452 #define ELF_EXEC_PAGESIZE 4096
454 #endif /* TARGET_MIPS */
456 #ifdef TARGET_MICROBLAZE
458 #define ELF_START_MMAP 0x80000000
460 #define elf_check_arch(x) ( (x) == EM_XILINX_MICROBLAZE )
462 #define ELF_CLASS ELFCLASS32
463 #define ELF_DATA ELFDATA2MSB
464 #define ELF_ARCH EM_MIPS
466 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
468 regs
->pc
= infop
->entry
;
469 regs
->r1
= infop
->start_stack
;
473 #define USE_ELF_CORE_DUMP
474 #define ELF_EXEC_PAGESIZE 4096
476 #endif /* TARGET_MICROBLAZE */
480 #define ELF_START_MMAP 0x80000000
482 #define elf_check_arch(x) ( (x) == EM_SH )
484 #define ELF_CLASS ELFCLASS32
485 #define ELF_DATA ELFDATA2LSB
486 #define ELF_ARCH EM_SH
488 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
490 /* Check other registers XXXXX */
491 regs
->pc
= infop
->entry
;
492 regs
->regs
[15] = infop
->start_stack
;
495 #define USE_ELF_CORE_DUMP
496 #define ELF_EXEC_PAGESIZE 4096
502 #define ELF_START_MMAP 0x80000000
504 #define elf_check_arch(x) ( (x) == EM_CRIS )
506 #define ELF_CLASS ELFCLASS32
507 #define ELF_DATA ELFDATA2LSB
508 #define ELF_ARCH EM_CRIS
510 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
512 regs
->erp
= infop
->entry
;
515 #define USE_ELF_CORE_DUMP
516 #define ELF_EXEC_PAGESIZE 8192
522 #define ELF_START_MMAP 0x80000000
524 #define elf_check_arch(x) ( (x) == EM_68K )
526 #define ELF_CLASS ELFCLASS32
527 #define ELF_DATA ELFDATA2MSB
528 #define ELF_ARCH EM_68K
530 /* ??? Does this need to do anything?
531 #define ELF_PLAT_INIT(_r) */
533 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
535 regs
->usp
= infop
->start_stack
;
537 regs
->pc
= infop
->entry
;
540 #define USE_ELF_CORE_DUMP
541 #define ELF_EXEC_PAGESIZE 8192
547 #define ELF_START_MMAP 0x80000000
549 #define elf_check_arch(x) ( (x) == EM_PARISC )
551 #define ELF_CLASS ELFCLASS32
552 #define ELF_DATA ELFDATA2MSB
553 #define ELF_ARCH EM_PARISC
555 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
557 abi_ulong stack
= infop
->start_stack
;
558 abi_ulong entry
= infop
->entry
;
560 regs
->iaoq
[0] = entry
;
561 regs
->iaoq
[1] = entry
+ 4;
562 get_user_ual(regs
->gr
[25], stack
); /* argc */
563 get_user_ual(regs
->gr
[24], stack
+ 4); /* argv */
564 regs
->gr
[30] = stack
;
565 regs
->gr
[31] = entry
;
572 #define ELF_START_MMAP (0x30000000000ULL)
574 #define elf_check_arch(x) ( (x) == ELF_ARCH )
576 #define ELF_CLASS ELFCLASS64
577 #define ELF_DATA ELFDATA2MSB
578 #define ELF_ARCH EM_ALPHA
580 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
582 regs
->pc
= infop
->entry
;
584 regs
->usp
= infop
->start_stack
;
585 regs
->unique
= infop
->start_data
; /* ? */
586 printf("Set unique value to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n",
587 regs
->unique
, infop
->start_data
);
590 #define USE_ELF_CORE_DUMP
591 #define ELF_EXEC_PAGESIZE 8192
593 #endif /* TARGET_ALPHA */
596 #define ELF_PLATFORM (NULL)
605 #define ELF_CLASS ELFCLASS32
607 #define bswaptls(ptr) bswap32s(ptr)
614 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
615 unsigned int a_text
; /* length of text, in bytes */
616 unsigned int a_data
; /* length of data, in bytes */
617 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
618 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
619 unsigned int a_entry
; /* start address */
620 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
621 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
625 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
631 /* max code+data+bss space allocated to elf interpreter */
632 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
634 /* max code+data+bss+brk space allocated to ET_DYN executables */
635 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
637 /* Necessary parameters */
638 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
639 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
640 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
642 #define INTERPRETER_NONE 0
643 #define INTERPRETER_AOUT 1
644 #define INTERPRETER_ELF 2
646 #define DLINFO_ITEMS 12
648 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
653 static int load_aout_interp(void * exptr
, int interp_fd
);
656 static void bswap_ehdr(struct elfhdr
*ehdr
)
658 bswap16s(&ehdr
->e_type
); /* Object file type */
659 bswap16s(&ehdr
->e_machine
); /* Architecture */
660 bswap32s(&ehdr
->e_version
); /* Object file version */
661 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
662 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
663 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
664 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
665 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
666 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
667 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
668 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
669 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
670 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
673 static void bswap_phdr(struct elf_phdr
*phdr
)
675 bswap32s(&phdr
->p_type
); /* Segment type */
676 bswaptls(&phdr
->p_offset
); /* Segment file offset */
677 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
678 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
679 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
680 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
681 bswap32s(&phdr
->p_flags
); /* Segment flags */
682 bswaptls(&phdr
->p_align
); /* Segment alignment */
685 static void bswap_shdr(struct elf_shdr
*shdr
)
687 bswap32s(&shdr
->sh_name
);
688 bswap32s(&shdr
->sh_type
);
689 bswaptls(&shdr
->sh_flags
);
690 bswaptls(&shdr
->sh_addr
);
691 bswaptls(&shdr
->sh_offset
);
692 bswaptls(&shdr
->sh_size
);
693 bswap32s(&shdr
->sh_link
);
694 bswap32s(&shdr
->sh_info
);
695 bswaptls(&shdr
->sh_addralign
);
696 bswaptls(&shdr
->sh_entsize
);
699 static void bswap_sym(struct elf_sym
*sym
)
701 bswap32s(&sym
->st_name
);
702 bswaptls(&sym
->st_value
);
703 bswaptls(&sym
->st_size
);
704 bswap16s(&sym
->st_shndx
);
709 * 'copy_elf_strings()' copies argument/envelope strings from user
710 * memory to free pages in kernel mem. These are in a format ready
711 * to be put directly into the top of new user memory.
714 static abi_ulong
copy_elf_strings(int argc
,char ** argv
, void **page
,
717 char *tmp
, *tmp1
, *pag
= NULL
;
721 return 0; /* bullet-proofing */
726 fprintf(stderr
, "VFS: argc is wrong");
732 if (p
< len
) { /* this shouldn't happen - 128kB */
738 offset
= p
% TARGET_PAGE_SIZE
;
739 pag
= (char *)page
[p
/TARGET_PAGE_SIZE
];
741 pag
= (char *)malloc(TARGET_PAGE_SIZE
);
742 memset(pag
, 0, TARGET_PAGE_SIZE
);
743 page
[p
/TARGET_PAGE_SIZE
] = pag
;
748 if (len
== 0 || offset
== 0) {
749 *(pag
+ offset
) = *tmp
;
752 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
753 tmp
-= bytes_to_copy
;
755 offset
-= bytes_to_copy
;
756 len
-= bytes_to_copy
;
757 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
764 static abi_ulong
setup_arg_pages(abi_ulong p
, struct linux_binprm
*bprm
,
765 struct image_info
*info
)
767 abi_ulong stack_base
, size
, error
;
770 /* Create enough stack to hold everything. If we don't use
771 * it for args, we'll use it for something else...
773 size
= x86_stack_size
;
774 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
775 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
776 error
= target_mmap(0,
777 size
+ qemu_host_page_size
,
778 PROT_READ
| PROT_WRITE
,
779 MAP_PRIVATE
| MAP_ANONYMOUS
,
785 /* we reserve one extra page at the top of the stack as guard */
786 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
788 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
791 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
794 /* FIXME - check return value of memcpy_to_target() for failure */
795 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
798 stack_base
+= TARGET_PAGE_SIZE
;
803 static void set_brk(abi_ulong start
, abi_ulong end
)
805 /* page-align the start and end addresses... */
806 start
= HOST_PAGE_ALIGN(start
);
807 end
= HOST_PAGE_ALIGN(end
);
810 if(target_mmap(start
, end
- start
,
811 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
812 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0) == -1) {
813 perror("cannot mmap brk");
819 /* We need to explicitly zero any fractional pages after the data
820 section (i.e. bss). This would contain the junk from the file that
821 should not be in memory. */
822 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
826 if (elf_bss
>= last_bss
)
829 /* XXX: this is really a hack : if the real host page size is
830 smaller than the target page size, some pages after the end
831 of the file may not be mapped. A better fix would be to
832 patch target_mmap(), but it is more complicated as the file
833 size must be known */
834 if (qemu_real_host_page_size
< qemu_host_page_size
) {
835 abi_ulong end_addr
, end_addr1
;
836 end_addr1
= (elf_bss
+ qemu_real_host_page_size
- 1) &
837 ~(qemu_real_host_page_size
- 1);
838 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
839 if (end_addr1
< end_addr
) {
840 mmap((void *)g2h(end_addr1
), end_addr
- end_addr1
,
841 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
842 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
846 nbyte
= elf_bss
& (qemu_host_page_size
-1);
848 nbyte
= qemu_host_page_size
- nbyte
;
850 /* FIXME - what to do if put_user() fails? */
851 put_user_u8(0, elf_bss
);
858 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
859 struct elfhdr
* exec
,
862 abi_ulong interp_load_addr
, int ibcs
,
863 struct image_info
*info
)
867 abi_ulong u_platform
;
868 const char *k_platform
;
869 const int n
= sizeof(elf_addr_t
);
873 k_platform
= ELF_PLATFORM
;
875 size_t len
= strlen(k_platform
) + 1;
876 sp
-= (len
+ n
- 1) & ~(n
- 1);
878 /* FIXME - check return value of memcpy_to_target() for failure */
879 memcpy_to_target(sp
, k_platform
, len
);
882 * Force 16 byte _final_ alignment here for generality.
884 sp
= sp
&~ (abi_ulong
)15;
885 size
= (DLINFO_ITEMS
+ 1) * 2;
888 #ifdef DLINFO_ARCH_ITEMS
889 size
+= DLINFO_ARCH_ITEMS
* 2;
891 size
+= envc
+ argc
+ 2;
892 size
+= (!ibcs
? 3 : 1); /* argc itself */
895 sp
-= 16 - (size
& 15);
897 /* This is correct because Linux defines
898 * elf_addr_t as Elf32_Off / Elf64_Off
900 #define NEW_AUX_ENT(id, val) do { \
901 sp -= n; put_user_ual(val, sp); \
902 sp -= n; put_user_ual(id, sp); \
905 NEW_AUX_ENT (AT_NULL
, 0);
907 /* There must be exactly DLINFO_ITEMS entries here. */
908 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
909 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof (struct elf_phdr
)));
910 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
911 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
912 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
913 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
914 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
915 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
916 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
917 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
918 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
919 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
920 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
922 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
925 * ARCH_DLINFO must come last so platform specific code can enforce
926 * special alignment requirements on the AUXV if necessary (eg. PPC).
932 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
937 static abi_ulong
load_elf_interp(struct elfhdr
* interp_elf_ex
,
939 abi_ulong
*interp_load_addr
)
941 struct elf_phdr
*elf_phdata
= NULL
;
942 struct elf_phdr
*eppnt
;
943 abi_ulong load_addr
= 0;
944 int load_addr_set
= 0;
946 abi_ulong last_bss
, elf_bss
;
955 bswap_ehdr(interp_elf_ex
);
957 /* First of all, some simple consistency checks */
958 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
959 interp_elf_ex
->e_type
!= ET_DYN
) ||
960 !elf_check_arch(interp_elf_ex
->e_machine
)) {
961 return ~((abi_ulong
)0UL);
965 /* Now read in all of the header information */
967 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
968 return ~(abi_ulong
)0UL;
970 elf_phdata
= (struct elf_phdr
*)
971 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
974 return ~((abi_ulong
)0UL);
977 * If the size of this structure has changed, then punt, since
978 * we will be doing the wrong thing.
980 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
982 return ~((abi_ulong
)0UL);
985 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
987 retval
= read(interpreter_fd
,
989 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
992 perror("load_elf_interp");
999 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
1004 if (interp_elf_ex
->e_type
== ET_DYN
) {
1005 /* in order to avoid hardcoding the interpreter load
1006 address in qemu, we allocate a big enough memory zone */
1007 error
= target_mmap(0, INTERP_MAP_SIZE
,
1008 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1019 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
1020 if (eppnt
->p_type
== PT_LOAD
) {
1021 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
1023 abi_ulong vaddr
= 0;
1026 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
1027 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1028 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1029 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
1030 elf_type
|= MAP_FIXED
;
1031 vaddr
= eppnt
->p_vaddr
;
1033 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
1034 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
1038 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
1042 close(interpreter_fd
);
1044 return ~((abi_ulong
)0UL);
1047 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
1053 * Find the end of the file mapping for this phdr, and keep
1054 * track of the largest address we see for this.
1056 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
1057 if (k
> elf_bss
) elf_bss
= k
;
1060 * Do the same thing for the memory mapping - between
1061 * elf_bss and last_bss is the bss section.
1063 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
1064 if (k
> last_bss
) last_bss
= k
;
1067 /* Now use mmap to map the library into memory. */
1069 close(interpreter_fd
);
1072 * Now fill out the bss section. First pad the last page up
1073 * to the page boundary, and then perform a mmap to make sure
1074 * that there are zeromapped pages up to and including the last
1077 padzero(elf_bss
, last_bss
);
1078 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
1080 /* Map the last of the bss segment */
1081 if (last_bss
> elf_bss
) {
1082 target_mmap(elf_bss
, last_bss
-elf_bss
,
1083 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
1084 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
1088 *interp_load_addr
= load_addr
;
1089 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
1092 static int symfind(const void *s0
, const void *s1
)
1094 struct elf_sym
*key
= (struct elf_sym
*)s0
;
1095 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
1097 if (key
->st_value
< sym
->st_value
) {
1099 } else if (key
->st_value
> sym
->st_value
+ sym
->st_size
) {
1105 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
1107 #if ELF_CLASS == ELFCLASS32
1108 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
1110 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
1115 struct elf_sym
*sym
;
1117 key
.st_value
= orig_addr
;
1119 sym
= bsearch(&key
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
1121 return s
->disas_strtab
+ sym
->st_name
;
1127 /* FIXME: This should use elf_ops.h */
1128 static int symcmp(const void *s0
, const void *s1
)
1130 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
1131 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
1132 return (sym0
->st_value
< sym1
->st_value
)
1134 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
1137 /* Best attempt to load symbols from this ELF object. */
1138 static void load_symbols(struct elfhdr
*hdr
, int fd
)
1140 unsigned int i
, nsyms
;
1141 struct elf_shdr sechdr
, symtab
, strtab
;
1144 struct elf_sym
*syms
;
1146 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
1147 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
1148 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
1151 bswap_shdr(&sechdr
);
1153 if (sechdr
.sh_type
== SHT_SYMTAB
) {
1155 lseek(fd
, hdr
->e_shoff
1156 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
1157 if (read(fd
, &strtab
, sizeof(strtab
))
1161 bswap_shdr(&strtab
);
1166 return; /* Shouldn't happen... */
1169 /* Now know where the strtab and symtab are. Snarf them. */
1170 s
= malloc(sizeof(*s
));
1171 syms
= malloc(symtab
.sh_size
);
1174 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
1175 if (!s
->disas_strtab
)
1178 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
1179 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
)
1182 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
1187 bswap_sym(syms
+ i
);
1189 // Throw away entries which we do not need.
1190 if (syms
[i
].st_shndx
== SHN_UNDEF
||
1191 syms
[i
].st_shndx
>= SHN_LORESERVE
||
1192 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
1195 syms
[i
] = syms
[nsyms
];
1199 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1200 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1201 syms
[i
].st_value
&= ~(target_ulong
)1;
1205 syms
= realloc(syms
, nsyms
* sizeof(*syms
));
1207 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
1209 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
1210 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
)
1212 s
->disas_num_syms
= nsyms
;
1213 #if ELF_CLASS == ELFCLASS32
1214 s
->disas_symtab
.elf32
= syms
;
1215 s
->lookup_symbol
= lookup_symbolxx
;
1217 s
->disas_symtab
.elf64
= syms
;
1218 s
->lookup_symbol
= lookup_symbolxx
;
1224 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
1225 struct image_info
* info
)
1227 struct elfhdr elf_ex
;
1228 struct elfhdr interp_elf_ex
;
1229 struct exec interp_ex
;
1230 int interpreter_fd
= -1; /* avoid warning */
1231 abi_ulong load_addr
, load_bias
;
1232 int load_addr_set
= 0;
1233 unsigned int interpreter_type
= INTERPRETER_NONE
;
1234 unsigned char ibcs2_interpreter
;
1236 abi_ulong mapped_addr
;
1237 struct elf_phdr
* elf_ppnt
;
1238 struct elf_phdr
*elf_phdata
;
1239 abi_ulong elf_bss
, k
, elf_brk
;
1241 char * elf_interpreter
;
1242 abi_ulong elf_entry
, interp_load_addr
= 0;
1244 abi_ulong start_code
, end_code
, start_data
, end_data
;
1245 abi_ulong reloc_func_desc
= 0;
1246 abi_ulong elf_stack
;
1247 char passed_fileno
[6];
1249 ibcs2_interpreter
= 0;
1253 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
1255 bswap_ehdr(&elf_ex
);
1258 /* First of all, some simple consistency checks */
1259 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
1260 (! elf_check_arch(elf_ex
.e_machine
))) {
1264 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
1265 bprm
->p
= copy_elf_strings(bprm
->envc
,bprm
->envp
,bprm
->page
,bprm
->p
);
1266 bprm
->p
= copy_elf_strings(bprm
->argc
,bprm
->argv
,bprm
->page
,bprm
->p
);
1271 /* Now read in all of the header information */
1272 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
1273 if (elf_phdata
== NULL
) {
1277 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
1279 retval
= read(bprm
->fd
, (char *) elf_phdata
,
1280 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
1284 perror("load_elf_binary");
1291 elf_ppnt
= elf_phdata
;
1292 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1293 bswap_phdr(elf_ppnt
);
1296 elf_ppnt
= elf_phdata
;
1302 elf_stack
= ~((abi_ulong
)0UL);
1303 elf_interpreter
= NULL
;
1304 start_code
= ~((abi_ulong
)0UL);
1308 interp_ex
.a_info
= 0;
1310 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
1311 if (elf_ppnt
->p_type
== PT_INTERP
) {
1312 if ( elf_interpreter
!= NULL
)
1315 free(elf_interpreter
);
1320 /* This is the program interpreter used for
1321 * shared libraries - for now assume that this
1322 * is an a.out format binary
1325 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
1327 if (elf_interpreter
== NULL
) {
1333 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
1335 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
1338 perror("load_elf_binary2");
1342 /* If the program interpreter is one of these two,
1343 then assume an iBCS2 image. Otherwise assume
1344 a native linux image. */
1346 /* JRP - Need to add X86 lib dir stuff here... */
1348 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
1349 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
1350 ibcs2_interpreter
= 1;
1354 printf("Using ELF interpreter %s\n", elf_interpreter
);
1357 retval
= open(path(elf_interpreter
), O_RDONLY
);
1359 interpreter_fd
= retval
;
1362 perror(elf_interpreter
);
1364 /* retval = -errno; */
1369 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
1371 retval
= read(interpreter_fd
,bprm
->buf
,128);
1375 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
1376 interp_elf_ex
=*((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
1379 perror("load_elf_binary3");
1382 free(elf_interpreter
);
1390 /* Some simple consistency checks for the interpreter */
1391 if (elf_interpreter
){
1392 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
1394 /* Now figure out which format our binary is */
1395 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
1396 (N_MAGIC(interp_ex
) != QMAGIC
)) {
1397 interpreter_type
= INTERPRETER_ELF
;
1400 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
1401 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
1402 interpreter_type
&= ~INTERPRETER_ELF
;
1405 if (!interpreter_type
) {
1406 free(elf_interpreter
);
1413 /* OK, we are done with that, now set up the arg stuff,
1414 and then start this sucker up */
1419 if (interpreter_type
== INTERPRETER_AOUT
) {
1420 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
1421 passed_p
= passed_fileno
;
1423 if (elf_interpreter
) {
1424 bprm
->p
= copy_elf_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
1429 if (elf_interpreter
) {
1430 free(elf_interpreter
);
1438 /* OK, This is the point of no return */
1441 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
1443 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
1445 /* Do this so that we can load the interpreter, if need be. We will
1446 change some of these later */
1448 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1449 info
->start_stack
= bprm
->p
;
1451 /* Now we do a little grungy work by mmaping the ELF image into
1452 * the correct location in memory. At this point, we assume that
1453 * the image should be loaded at fixed address, not at a variable
1457 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1462 if (elf_ppnt
->p_type
!= PT_LOAD
)
1465 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1466 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1467 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1468 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1469 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1470 elf_flags
|= MAP_FIXED
;
1471 } else if (elf_ex
.e_type
== ET_DYN
) {
1472 /* Try and get dynamic programs out of the way of the default mmap
1473 base, as well as whatever program they might try to exec. This
1474 is because the brk will follow the loader, and is not movable. */
1475 /* NOTE: for qemu, we do a big mmap to get enough space
1476 without hardcoding any address */
1477 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1478 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1484 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1487 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1488 (elf_ppnt
->p_filesz
+
1489 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1491 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1493 (elf_ppnt
->p_offset
-
1494 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1500 #ifdef LOW_ELF_STACK
1501 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1502 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1505 if (!load_addr_set
) {
1507 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1508 if (elf_ex
.e_type
== ET_DYN
) {
1509 load_bias
+= error
-
1510 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1511 load_addr
+= load_bias
;
1512 reloc_func_desc
= load_bias
;
1515 k
= elf_ppnt
->p_vaddr
;
1520 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1523 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1527 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1528 if (k
> elf_brk
) elf_brk
= k
;
1531 elf_entry
+= load_bias
;
1532 elf_bss
+= load_bias
;
1533 elf_brk
+= load_bias
;
1534 start_code
+= load_bias
;
1535 end_code
+= load_bias
;
1536 start_data
+= load_bias
;
1537 end_data
+= load_bias
;
1539 if (elf_interpreter
) {
1540 if (interpreter_type
& 1) {
1541 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1543 else if (interpreter_type
& 2) {
1544 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1547 reloc_func_desc
= interp_load_addr
;
1549 close(interpreter_fd
);
1550 free(elf_interpreter
);
1552 if (elf_entry
== ~((abi_ulong
)0UL)) {
1553 printf("Unable to load interpreter\n");
1562 if (qemu_log_enabled())
1563 load_symbols(&elf_ex
, bprm
->fd
);
1565 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1566 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1568 #ifdef LOW_ELF_STACK
1569 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1571 bprm
->p
= create_elf_tables(bprm
->p
,
1575 load_addr
, load_bias
,
1577 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1579 info
->load_addr
= reloc_func_desc
;
1580 info
->start_brk
= info
->brk
= elf_brk
;
1581 info
->end_code
= end_code
;
1582 info
->start_code
= start_code
;
1583 info
->start_data
= start_data
;
1584 info
->end_data
= end_data
;
1585 info
->start_stack
= bprm
->p
;
1587 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1589 set_brk(elf_bss
, elf_brk
);
1591 padzero(elf_bss
, elf_brk
);
1594 printf("(start_brk) %x\n" , info
->start_brk
);
1595 printf("(end_code) %x\n" , info
->end_code
);
1596 printf("(start_code) %x\n" , info
->start_code
);
1597 printf("(end_data) %x\n" , info
->end_data
);
1598 printf("(start_stack) %x\n" , info
->start_stack
);
1599 printf("(brk) %x\n" , info
->brk
);
1602 if ( info
->personality
== PER_SVR4
)
1604 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1605 and some applications "depend" upon this behavior.
1606 Since we do not have the power to recompile these, we
1607 emulate the SVr4 behavior. Sigh. */
1608 mapped_addr
= target_mmap(0, qemu_host_page_size
, PROT_READ
| PROT_EXEC
,
1609 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1612 info
->entry
= elf_entry
;
1617 static int load_aout_interp(void * exptr
, int interp_fd
)
1619 printf("a.out interpreter not yet supported\n");
1623 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
1625 init_thread(regs
, infop
);