2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
41 #include <asm/uaccess.h>
42 #include <asm/param.h>
45 #include <linux/elf.h>
47 static int load_elf_binary(struct linux_binprm
* bprm
, struct pt_regs
* regs
);
48 static int load_elf_library(struct file
*);
49 static unsigned long elf_map (struct file
*, unsigned long, struct elf_phdr
*, int, int);
50 extern int dump_fpu (struct pt_regs
*, elf_fpregset_t
*);
53 #define elf_addr_t unsigned long
57 * If we don't support core dumping, then supply a NULL so we
60 #ifdef USE_ELF_CORE_DUMP
61 static int elf_core_dump(long signr
, struct pt_regs
* regs
, struct file
* file
);
63 #define elf_core_dump NULL
66 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
67 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
69 # define ELF_MIN_ALIGN PAGE_SIZE
72 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
73 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
74 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
76 static struct linux_binfmt elf_format
= {
77 .module
= THIS_MODULE
,
78 .load_binary
= load_elf_binary
,
79 .load_shlib
= load_elf_library
,
80 .core_dump
= elf_core_dump
,
81 .min_coredump
= ELF_EXEC_PAGESIZE
84 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
86 static int set_brk(unsigned long start
, unsigned long end
)
88 start
= ELF_PAGEALIGN(start
);
89 end
= ELF_PAGEALIGN(end
);
91 unsigned long addr
= do_brk(start
, end
- start
);
95 current
->mm
->start_brk
= current
->mm
->brk
= end
;
100 /* We need to explicitly zero any fractional pages
101 after the data section (i.e. bss). This would
102 contain the junk from the file that should not
106 static void padzero(unsigned long elf_bss
)
110 nbyte
= ELF_PAGEOFFSET(elf_bss
);
112 nbyte
= ELF_MIN_ALIGN
- nbyte
;
113 clear_user((void __user
*) elf_bss
, nbyte
);
117 /* Let's use some macros to make this stack manipulation a litle clearer */
118 #ifdef CONFIG_STACK_GROWSUP
119 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
120 #define STACK_ROUND(sp, items) \
121 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
122 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
124 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
125 #define STACK_ROUND(sp, items) \
126 (((unsigned long) (sp - items)) &~ 15UL)
127 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
131 create_elf_tables(struct linux_binprm
*bprm
, struct elfhdr
* exec
,
132 int interp_aout
, unsigned long load_addr
,
133 unsigned long interp_load_addr
)
135 unsigned long p
= bprm
->p
;
136 int argc
= bprm
->argc
;
137 int envc
= bprm
->envc
;
138 elf_addr_t __user
*argv
;
139 elf_addr_t __user
*envp
;
140 elf_addr_t __user
*sp
;
141 elf_addr_t __user
*u_platform
;
142 const char *k_platform
= ELF_PLATFORM
;
144 elf_addr_t
*elf_info
;
146 struct task_struct
*tsk
= current
;
149 * If this architecture has a platform capability string, copy it
150 * to userspace. In some cases (Sparc), this info is impossible
151 * for userspace to get any other way, in others (i386) it is
157 size_t len
= strlen(k_platform
) + 1;
161 * In some cases (e.g. Hyper-Threading), we want to avoid L1
162 * evictions by the processes running on the same package. One
163 * thing we can do is to shuffle the initial stack for them.
165 * The conditionals here are unneeded, but kept in to make the
166 * code behaviour the same as pre change unless we have
167 * hyperthreaded processors. This should be cleaned up
171 if (smp_num_siblings
> 1)
172 STACK_ALLOC(p
, ((current
->pid
% 64) << 7));
174 u_platform
= (elf_addr_t __user
*)STACK_ALLOC(p
, len
);
175 __copy_to_user(u_platform
, k_platform
, len
);
178 /* Create the ELF interpreter info */
179 elf_info
= (elf_addr_t
*) current
->mm
->saved_auxv
;
180 #define NEW_AUX_ENT(id, val) \
181 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
185 * ARCH_DLINFO must come first so PPC can do its special alignment of
190 NEW_AUX_ENT(AT_HWCAP
, ELF_HWCAP
);
191 NEW_AUX_ENT(AT_PAGESZ
, ELF_EXEC_PAGESIZE
);
192 NEW_AUX_ENT(AT_CLKTCK
, CLOCKS_PER_SEC
);
193 NEW_AUX_ENT(AT_PHDR
, load_addr
+ exec
->e_phoff
);
194 NEW_AUX_ENT(AT_PHENT
, sizeof (struct elf_phdr
));
195 NEW_AUX_ENT(AT_PHNUM
, exec
->e_phnum
);
196 NEW_AUX_ENT(AT_BASE
, interp_load_addr
);
197 NEW_AUX_ENT(AT_FLAGS
, 0);
198 NEW_AUX_ENT(AT_ENTRY
, exec
->e_entry
);
199 NEW_AUX_ENT(AT_UID
, (elf_addr_t
) tsk
->uid
);
200 NEW_AUX_ENT(AT_EUID
, (elf_addr_t
) tsk
->euid
);
201 NEW_AUX_ENT(AT_GID
, (elf_addr_t
) tsk
->gid
);
202 NEW_AUX_ENT(AT_EGID
, (elf_addr_t
) tsk
->egid
);
203 NEW_AUX_ENT(AT_SECURE
, (elf_addr_t
) security_bprm_secureexec(bprm
));
205 NEW_AUX_ENT(AT_PLATFORM
, (elf_addr_t
)(unsigned long)u_platform
);
207 if (bprm
->interp_flags
& BINPRM_FLAGS_EXECFD
) {
208 NEW_AUX_ENT(AT_EXECFD
, (elf_addr_t
) bprm
->interp_data
);
211 /* AT_NULL is zero; clear the rest too */
212 memset(&elf_info
[ei_index
], 0,
213 sizeof current
->mm
->saved_auxv
- ei_index
* sizeof elf_info
[0]);
215 /* And advance past the AT_NULL entry. */
218 sp
= STACK_ADD(p
, ei_index
);
220 items
= (argc
+ 1) + (envc
+ 1);
222 items
+= 3; /* a.out interpreters require argv & envp too */
224 items
+= 1; /* ELF interpreters only put argc on the stack */
226 bprm
->p
= STACK_ROUND(sp
, items
);
228 /* Point sp at the lowest address on the stack */
229 #ifdef CONFIG_STACK_GROWSUP
230 sp
= (elf_addr_t __user
*)bprm
->p
- items
- ei_index
;
231 bprm
->exec
= (unsigned long) sp
; /* XXX: PARISC HACK */
233 sp
= (elf_addr_t __user
*)bprm
->p
;
236 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
237 __put_user(argc
, sp
++);
240 envp
= argv
+ argc
+ 1;
241 __put_user((elf_addr_t
)(unsigned long)argv
, sp
++);
242 __put_user((elf_addr_t
)(unsigned long)envp
, sp
++);
245 envp
= argv
+ argc
+ 1;
248 /* Populate argv and envp */
249 p
= current
->mm
->arg_start
;
252 __put_user((elf_addr_t
)p
, argv
++);
253 len
= strnlen_user((void __user
*)p
, PAGE_SIZE
*MAX_ARG_PAGES
);
254 if (!len
|| len
> PAGE_SIZE
*MAX_ARG_PAGES
)
259 current
->mm
->arg_end
= current
->mm
->env_start
= p
;
262 __put_user((elf_addr_t
)p
, envp
++);
263 len
= strnlen_user((void __user
*)p
, PAGE_SIZE
*MAX_ARG_PAGES
);
264 if (!len
|| len
> PAGE_SIZE
*MAX_ARG_PAGES
)
269 current
->mm
->env_end
= p
;
271 /* Put the elf_info on the stack in the right place. */
272 sp
= (elf_addr_t __user
*)envp
+ 1;
273 copy_to_user(sp
, elf_info
, ei_index
* sizeof(elf_addr_t
));
278 static unsigned long elf_map(struct file
*filep
, unsigned long addr
,
279 struct elf_phdr
*eppnt
, int prot
, int type
)
281 unsigned long map_addr
;
283 down_write(¤t
->mm
->mmap_sem
);
284 map_addr
= do_mmap(filep
, ELF_PAGESTART(addr
),
285 eppnt
->p_filesz
+ ELF_PAGEOFFSET(eppnt
->p_vaddr
), prot
, type
,
286 eppnt
->p_offset
- ELF_PAGEOFFSET(eppnt
->p_vaddr
));
287 up_write(¤t
->mm
->mmap_sem
);
291 #endif /* !elf_map */
293 /* This is much more generalized than the library routine read function,
294 so we keep this separate. Technically the library read function
295 is only provided so that we can read a.out libraries that have
298 static unsigned long load_elf_interp(struct elfhdr
* interp_elf_ex
,
299 struct file
* interpreter
,
300 unsigned long *interp_load_addr
)
302 struct elf_phdr
*elf_phdata
;
303 struct elf_phdr
*eppnt
;
304 unsigned long load_addr
= 0;
305 int load_addr_set
= 0;
306 unsigned long last_bss
= 0, elf_bss
= 0;
307 unsigned long error
= ~0UL;
310 /* First of all, some simple consistency checks */
311 if (interp_elf_ex
->e_type
!= ET_EXEC
&&
312 interp_elf_ex
->e_type
!= ET_DYN
)
314 if (!elf_check_arch(interp_elf_ex
))
316 if (!interpreter
->f_op
|| !interpreter
->f_op
->mmap
)
320 * If the size of this structure has changed, then punt, since
321 * we will be doing the wrong thing.
323 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
))
325 if (interp_elf_ex
->e_phnum
> 65536U / sizeof(struct elf_phdr
))
328 /* Now read in all of the header information */
330 size
= sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
;
331 if (size
> ELF_MIN_ALIGN
)
333 elf_phdata
= (struct elf_phdr
*) kmalloc(size
, GFP_KERNEL
);
337 retval
= kernel_read(interpreter
,interp_elf_ex
->e_phoff
,(char *)elf_phdata
,size
);
343 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
344 if (eppnt
->p_type
== PT_LOAD
) {
345 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
347 unsigned long vaddr
= 0;
348 unsigned long k
, map_addr
;
350 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
351 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
352 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
353 vaddr
= eppnt
->p_vaddr
;
354 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
)
355 elf_type
|= MAP_FIXED
;
357 map_addr
= elf_map(interpreter
, load_addr
+ vaddr
, eppnt
, elf_prot
, elf_type
);
359 if (BAD_ADDR(map_addr
))
362 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
363 load_addr
= map_addr
- ELF_PAGESTART(vaddr
);
368 * Check to see if the section's size will overflow the
369 * allowed task size. Note that p_filesz must always be
370 * <= p_memsize so it is only necessary to check p_memsz.
372 k
= load_addr
+ eppnt
->p_vaddr
;
373 if (k
> TASK_SIZE
|| eppnt
->p_filesz
> eppnt
->p_memsz
||
374 eppnt
->p_memsz
> TASK_SIZE
|| TASK_SIZE
- eppnt
->p_memsz
< k
) {
380 * Find the end of the file mapping for this phdr, and keep
381 * track of the largest address we see for this.
383 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
388 * Do the same thing for the memory mapping - between
389 * elf_bss and last_bss is the bss section.
391 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
398 * Now fill out the bss section. First pad the last page up
399 * to the page boundary, and then perform a mmap to make sure
400 * that there are zero-mapped pages up to and including the
404 elf_bss
= ELF_PAGESTART(elf_bss
+ ELF_MIN_ALIGN
- 1); /* What we have mapped so far */
406 /* Map the last of the bss segment */
407 if (last_bss
> elf_bss
) {
408 error
= do_brk(elf_bss
, last_bss
- elf_bss
);
413 *interp_load_addr
= load_addr
;
414 error
= ((unsigned long) interp_elf_ex
->e_entry
) + load_addr
;
422 static unsigned long load_aout_interp(struct exec
* interp_ex
,
423 struct file
* interpreter
)
425 unsigned long text_data
, elf_entry
= ~0UL;
429 current
->mm
->end_code
= interp_ex
->a_text
;
430 text_data
= interp_ex
->a_text
+ interp_ex
->a_data
;
431 current
->mm
->end_data
= text_data
;
432 current
->mm
->brk
= interp_ex
->a_bss
+ text_data
;
434 switch (N_MAGIC(*interp_ex
)) {
437 addr
= (char __user
*)0;
441 offset
= N_TXTOFF(*interp_ex
);
442 addr
= (char __user
*) N_TXTADDR(*interp_ex
);
448 do_brk(0, text_data
);
449 if (!interpreter
->f_op
|| !interpreter
->f_op
->read
)
451 if (interpreter
->f_op
->read(interpreter
, addr
, text_data
, &offset
) < 0)
453 flush_icache_range((unsigned long)addr
,
454 (unsigned long)addr
+ text_data
);
456 do_brk(ELF_PAGESTART(text_data
+ ELF_MIN_ALIGN
- 1),
458 elf_entry
= interp_ex
->a_entry
;
465 * These are the functions used to load ELF style executables and shared
466 * libraries. There is no binary dependent code anywhere else.
469 #define INTERPRETER_NONE 0
470 #define INTERPRETER_AOUT 1
471 #define INTERPRETER_ELF 2
474 static int load_elf_binary(struct linux_binprm
* bprm
, struct pt_regs
* regs
)
476 struct file
*interpreter
= NULL
; /* to shut gcc up */
477 unsigned long load_addr
= 0, load_bias
= 0;
478 int load_addr_set
= 0;
479 char * elf_interpreter
= NULL
;
480 unsigned int interpreter_type
= INTERPRETER_NONE
;
481 unsigned char ibcs2_interpreter
= 0;
483 struct elf_phdr
* elf_ppnt
, *elf_phdata
;
484 unsigned long elf_bss
, elf_brk
;
488 unsigned long elf_entry
, interp_load_addr
= 0;
489 unsigned long start_code
, end_code
, start_data
, end_data
;
490 unsigned long reloc_func_desc
= 0;
491 char passed_fileno
[6];
492 struct files_struct
*files
;
493 int have_pt_gnu_stack
, executable_stack
= EXSTACK_DEFAULT
;
494 unsigned long def_flags
= 0;
496 struct elfhdr elf_ex
;
497 struct elfhdr interp_elf_ex
;
498 struct exec interp_ex
;
501 loc
= kmalloc(sizeof(*loc
), GFP_KERNEL
);
507 /* Get the exec-header */
508 loc
->elf_ex
= *((struct elfhdr
*) bprm
->buf
);
511 /* First of all, some simple consistency checks */
512 if (memcmp(loc
->elf_ex
.e_ident
, ELFMAG
, SELFMAG
) != 0)
515 if (loc
->elf_ex
.e_type
!= ET_EXEC
&& loc
->elf_ex
.e_type
!= ET_DYN
)
517 if (!elf_check_arch(&loc
->elf_ex
))
519 if (!bprm
->file
->f_op
||!bprm
->file
->f_op
->mmap
)
522 /* Now read in all of the header information */
525 if (loc
->elf_ex
.e_phentsize
!= sizeof(struct elf_phdr
))
527 if (loc
->elf_ex
.e_phnum
> 65536U / sizeof(struct elf_phdr
))
529 size
= loc
->elf_ex
.e_phnum
* sizeof(struct elf_phdr
);
530 elf_phdata
= (struct elf_phdr
*) kmalloc(size
, GFP_KERNEL
);
534 retval
= kernel_read(bprm
->file
, loc
->elf_ex
.e_phoff
, (char *) elf_phdata
, size
);
538 files
= current
->files
; /* Refcounted so ok */
539 retval
= unshare_files();
542 if (files
== current
->files
) {
543 put_files_struct(files
);
547 /* exec will make our files private anyway, but for the a.out
548 loader stuff we need to do it earlier */
550 retval
= get_unused_fd();
553 get_file(bprm
->file
);
554 fd_install(elf_exec_fileno
= retval
, bprm
->file
);
556 elf_ppnt
= elf_phdata
;
565 for (i
= 0; i
< loc
->elf_ex
.e_phnum
; i
++) {
566 if (elf_ppnt
->p_type
== PT_INTERP
) {
567 /* This is the program interpreter used for
568 * shared libraries - for now assume that this
569 * is an a.out format binary
573 if (elf_ppnt
->p_filesz
> PATH_MAX
)
575 elf_interpreter
= (char *) kmalloc(elf_ppnt
->p_filesz
,
577 if (!elf_interpreter
)
580 retval
= kernel_read(bprm
->file
, elf_ppnt
->p_offset
,
584 goto out_free_interp
;
585 /* If the program interpreter is one of these two,
586 * then assume an iBCS2 image. Otherwise assume
587 * a native linux image.
589 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
590 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0)
591 ibcs2_interpreter
= 1;
594 * The early SET_PERSONALITY here is so that the lookup
595 * for the interpreter happens in the namespace of the
596 * to-be-execed image. SET_PERSONALITY can select an
599 * However, SET_PERSONALITY is NOT allowed to switch
600 * this task into the new images's memory mapping
601 * policy - that is, TASK_SIZE must still evaluate to
602 * that which is appropriate to the execing application.
603 * This is because exit_mmap() needs to have TASK_SIZE
604 * evaluate to the size of the old image.
606 * So if (say) a 64-bit application is execing a 32-bit
607 * application it is the architecture's responsibility
608 * to defer changing the value of TASK_SIZE until the
609 * switch really is going to happen - do this in
610 * flush_thread(). - akpm
612 SET_PERSONALITY(loc
->elf_ex
, ibcs2_interpreter
);
614 interpreter
= open_exec(elf_interpreter
);
615 retval
= PTR_ERR(interpreter
);
616 if (IS_ERR(interpreter
))
617 goto out_free_interp
;
618 retval
= kernel_read(interpreter
, 0, bprm
->buf
, BINPRM_BUF_SIZE
);
620 goto out_free_dentry
;
622 /* Get the exec headers */
623 loc
->interp_ex
= *((struct exec
*) bprm
->buf
);
624 loc
->interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
);
630 elf_ppnt
= elf_phdata
;
631 for (i
= 0; i
< loc
->elf_ex
.e_phnum
; i
++, elf_ppnt
++)
632 if (elf_ppnt
->p_type
== PT_GNU_STACK
) {
633 if (elf_ppnt
->p_flags
& PF_X
)
634 executable_stack
= EXSTACK_ENABLE_X
;
636 executable_stack
= EXSTACK_DISABLE_X
;
639 have_pt_gnu_stack
= (i
< loc
->elf_ex
.e_phnum
);
641 /* Some simple consistency checks for the interpreter */
642 if (elf_interpreter
) {
643 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
645 /* Now figure out which format our binary is */
646 if ((N_MAGIC(loc
->interp_ex
) != OMAGIC
) &&
647 (N_MAGIC(loc
->interp_ex
) != ZMAGIC
) &&
648 (N_MAGIC(loc
->interp_ex
) != QMAGIC
))
649 interpreter_type
= INTERPRETER_ELF
;
651 if (memcmp(loc
->interp_elf_ex
.e_ident
, ELFMAG
, SELFMAG
) != 0)
652 interpreter_type
&= ~INTERPRETER_ELF
;
655 if (!interpreter_type
)
656 goto out_free_dentry
;
658 /* Make sure only one type was selected */
659 if ((interpreter_type
& INTERPRETER_ELF
) &&
660 interpreter_type
!= INTERPRETER_ELF
) {
661 // FIXME - ratelimit this before re-enabling
662 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
663 interpreter_type
= INTERPRETER_ELF
;
665 /* Verify the interpreter has a valid arch */
666 if ((interpreter_type
== INTERPRETER_ELF
) &&
667 !elf_check_arch(&loc
->interp_elf_ex
))
668 goto out_free_dentry
;
670 /* Executables without an interpreter also need a personality */
671 SET_PERSONALITY(loc
->elf_ex
, ibcs2_interpreter
);
674 /* OK, we are done with that, now set up the arg stuff,
675 and then start this sucker up */
677 if ((!bprm
->sh_bang
) && (interpreter_type
== INTERPRETER_AOUT
)) {
678 char *passed_p
= passed_fileno
;
679 sprintf(passed_fileno
, "%d", elf_exec_fileno
);
681 if (elf_interpreter
) {
682 retval
= copy_strings_kernel(1, &passed_p
, bprm
);
684 goto out_free_dentry
;
689 /* Flush all traces of the currently running executable */
690 retval
= flush_old_exec(bprm
);
692 goto out_free_dentry
;
694 /* Discard our unneeded old files struct */
697 put_files_struct(files
);
701 /* OK, This is the point of no return */
702 current
->mm
->start_data
= 0;
703 current
->mm
->end_data
= 0;
704 current
->mm
->end_code
= 0;
705 current
->mm
->mmap
= NULL
;
706 current
->flags
&= ~PF_FORKNOEXEC
;
707 current
->mm
->def_flags
= def_flags
;
709 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
710 may depend on the personality. */
711 SET_PERSONALITY(loc
->elf_ex
, ibcs2_interpreter
);
712 if (elf_read_implies_exec(loc
->elf_ex
, have_pt_gnu_stack
))
713 current
->personality
|= READ_IMPLIES_EXEC
;
715 arch_pick_mmap_layout(current
->mm
);
717 /* Do this so that we can load the interpreter, if need be. We will
718 change some of these later */
719 current
->mm
->rss
= 0;
720 current
->mm
->free_area_cache
= current
->mm
->mmap_base
;
721 retval
= setup_arg_pages(bprm
, executable_stack
);
723 send_sig(SIGKILL
, current
, 0);
724 goto out_free_dentry
;
727 current
->mm
->start_stack
= bprm
->p
;
729 /* Now we do a little grungy work by mmaping the ELF image into
730 the correct location in memory. At this point, we assume that
731 the image should be loaded at fixed address, not at a variable
734 for(i
= 0, elf_ppnt
= elf_phdata
; i
< loc
->elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
735 int elf_prot
= 0, elf_flags
;
736 unsigned long k
, vaddr
;
738 if (elf_ppnt
->p_type
!= PT_LOAD
)
741 if (unlikely (elf_brk
> elf_bss
)) {
744 /* There was a PT_LOAD segment with p_memsz > p_filesz
745 before this one. Map anonymous pages, if needed,
746 and clear the area. */
747 retval
= set_brk (elf_bss
+ load_bias
,
748 elf_brk
+ load_bias
);
750 send_sig(SIGKILL
, current
, 0);
751 goto out_free_dentry
;
753 nbyte
= ELF_PAGEOFFSET(elf_bss
);
755 nbyte
= ELF_MIN_ALIGN
- nbyte
;
756 if (nbyte
> elf_brk
- elf_bss
)
757 nbyte
= elf_brk
- elf_bss
;
758 clear_user((void __user
*) elf_bss
+ load_bias
, nbyte
);
762 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
763 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
764 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
766 elf_flags
= MAP_PRIVATE
|MAP_DENYWRITE
|MAP_EXECUTABLE
;
768 vaddr
= elf_ppnt
->p_vaddr
;
769 if (loc
->elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
770 elf_flags
|= MAP_FIXED
;
771 } else if (loc
->elf_ex
.e_type
== ET_DYN
) {
772 /* Try and get dynamic programs out of the way of the default mmap
773 base, as well as whatever program they might try to exec. This
774 is because the brk will follow the loader, and is not movable. */
775 load_bias
= ELF_PAGESTART(ELF_ET_DYN_BASE
- vaddr
);
778 error
= elf_map(bprm
->file
, load_bias
+ vaddr
, elf_ppnt
, elf_prot
, elf_flags
);
782 if (!load_addr_set
) {
784 load_addr
= (elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
);
785 if (loc
->elf_ex
.e_type
== ET_DYN
) {
787 ELF_PAGESTART(load_bias
+ vaddr
);
788 load_addr
+= load_bias
;
789 reloc_func_desc
= load_bias
;
792 k
= elf_ppnt
->p_vaddr
;
793 if (k
< start_code
) start_code
= k
;
794 if (start_data
< k
) start_data
= k
;
797 * Check to see if the section's size will overflow the
798 * allowed task size. Note that p_filesz must always be
799 * <= p_memsz so it is only necessary to check p_memsz.
801 if (k
> TASK_SIZE
|| elf_ppnt
->p_filesz
> elf_ppnt
->p_memsz
||
802 elf_ppnt
->p_memsz
> TASK_SIZE
||
803 TASK_SIZE
- elf_ppnt
->p_memsz
< k
) {
804 /* set_brk can never work. Avoid overflows. */
805 send_sig(SIGKILL
, current
, 0);
806 goto out_free_dentry
;
809 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
813 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
817 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
822 loc
->elf_ex
.e_entry
+= load_bias
;
823 elf_bss
+= load_bias
;
824 elf_brk
+= load_bias
;
825 start_code
+= load_bias
;
826 end_code
+= load_bias
;
827 start_data
+= load_bias
;
828 end_data
+= load_bias
;
830 /* Calling set_brk effectively mmaps the pages that we need
831 * for the bss and break sections. We must do this before
832 * mapping in the interpreter, to make sure it doesn't wind
833 * up getting placed where the bss needs to go.
835 retval
= set_brk(elf_bss
, elf_brk
);
837 send_sig(SIGKILL
, current
, 0);
838 goto out_free_dentry
;
842 if (elf_interpreter
) {
843 if (interpreter_type
== INTERPRETER_AOUT
)
844 elf_entry
= load_aout_interp(&loc
->interp_ex
,
847 elf_entry
= load_elf_interp(&loc
->interp_elf_ex
,
850 if (BAD_ADDR(elf_entry
)) {
851 printk(KERN_ERR
"Unable to load interpreter\n");
852 send_sig(SIGSEGV
, current
, 0);
853 retval
= -ENOEXEC
; /* Nobody gets to see this, but.. */
854 goto out_free_dentry
;
856 reloc_func_desc
= interp_load_addr
;
858 allow_write_access(interpreter
);
860 kfree(elf_interpreter
);
862 elf_entry
= loc
->elf_ex
.e_entry
;
867 if (interpreter_type
!= INTERPRETER_AOUT
)
868 sys_close(elf_exec_fileno
);
870 set_binfmt(&elf_format
);
873 current
->flags
&= ~PF_FORKNOEXEC
;
874 create_elf_tables(bprm
, &loc
->elf_ex
, (interpreter_type
== INTERPRETER_AOUT
),
875 load_addr
, interp_load_addr
);
876 /* N.B. passed_fileno might not be initialized? */
877 if (interpreter_type
== INTERPRETER_AOUT
)
878 current
->mm
->arg_start
+= strlen(passed_fileno
) + 1;
879 current
->mm
->end_code
= end_code
;
880 current
->mm
->start_code
= start_code
;
881 current
->mm
->start_data
= start_data
;
882 current
->mm
->end_data
= end_data
;
883 current
->mm
->start_stack
= bprm
->p
;
885 if (current
->personality
& MMAP_PAGE_ZERO
) {
886 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
887 and some applications "depend" upon this behavior.
888 Since we do not have the power to recompile these, we
889 emulate the SVr4 behavior. Sigh. */
890 down_write(¤t
->mm
->mmap_sem
);
891 error
= do_mmap(NULL
, 0, PAGE_SIZE
, PROT_READ
| PROT_EXEC
,
892 MAP_FIXED
| MAP_PRIVATE
, 0);
893 up_write(¤t
->mm
->mmap_sem
);
898 * The ABI may specify that certain registers be set up in special
899 * ways (on i386 %edx is the address of a DT_FINI function, for
900 * example. In addition, it may also specify (eg, PowerPC64 ELF)
901 * that the e_entry field is the address of the function descriptor
902 * for the startup routine, rather than the address of the startup
903 * routine itself. This macro performs whatever initialization to
904 * the regs structure is required as well as any relocations to the
905 * function descriptor entries when executing dynamically links apps.
907 ELF_PLAT_INIT(regs
, reloc_func_desc
);
910 start_thread(regs
, elf_entry
, bprm
->p
);
911 if (unlikely(current
->ptrace
& PT_PTRACED
)) {
912 if (current
->ptrace
& PT_TRACE_EXEC
)
913 ptrace_notify ((PTRACE_EVENT_EXEC
<< 8) | SIGTRAP
);
915 send_sig(SIGTRAP
, current
, 0);
925 allow_write_access(interpreter
);
930 kfree(elf_interpreter
);
932 sys_close(elf_exec_fileno
);
935 put_files_struct(current
->files
);
936 current
->files
= files
;
943 /* This is really simpleminded and specialized - we are loading an
944 a.out library that is given an ELF header. */
946 static int load_elf_library(struct file
*file
)
948 struct elf_phdr
*elf_phdata
;
949 unsigned long elf_bss
, bss
, len
;
950 int retval
, error
, i
, j
;
951 struct elfhdr elf_ex
;
954 retval
= kernel_read(file
, 0, (char *) &elf_ex
, sizeof(elf_ex
));
955 if (retval
!= sizeof(elf_ex
))
958 if (memcmp(elf_ex
.e_ident
, ELFMAG
, SELFMAG
) != 0)
961 /* First of all, some simple consistency checks */
962 if (elf_ex
.e_type
!= ET_EXEC
|| elf_ex
.e_phnum
> 2 ||
963 !elf_check_arch(&elf_ex
) || !file
->f_op
|| !file
->f_op
->mmap
)
966 /* Now read in all of the header information */
968 j
= sizeof(struct elf_phdr
) * elf_ex
.e_phnum
;
969 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
972 elf_phdata
= (struct elf_phdr
*) kmalloc(j
, GFP_KERNEL
);
977 retval
= kernel_read(file
, elf_ex
.e_phoff
, (char *) elf_phdata
, j
);
981 for (j
= 0, i
= 0; i
<elf_ex
.e_phnum
; i
++)
982 if ((elf_phdata
+ i
)->p_type
== PT_LOAD
) j
++;
986 while (elf_phdata
->p_type
!= PT_LOAD
) elf_phdata
++;
988 /* Now use mmap to map the library into memory. */
989 down_write(¤t
->mm
->mmap_sem
);
990 error
= do_mmap(file
,
991 ELF_PAGESTART(elf_phdata
->p_vaddr
),
992 (elf_phdata
->p_filesz
+
993 ELF_PAGEOFFSET(elf_phdata
->p_vaddr
)),
994 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
995 MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
,
996 (elf_phdata
->p_offset
-
997 ELF_PAGEOFFSET(elf_phdata
->p_vaddr
)));
998 up_write(¤t
->mm
->mmap_sem
);
999 if (error
!= ELF_PAGESTART(elf_phdata
->p_vaddr
))
1002 elf_bss
= elf_phdata
->p_vaddr
+ elf_phdata
->p_filesz
;
1005 len
= ELF_PAGESTART(elf_phdata
->p_filesz
+ elf_phdata
->p_vaddr
+ ELF_MIN_ALIGN
- 1);
1006 bss
= elf_phdata
->p_memsz
+ elf_phdata
->p_vaddr
;
1008 do_brk(len
, bss
- len
);
1018 * Note that some platforms still use traditional core dumps and not
1019 * the ELF core dump. Each platform can select it as appropriate.
1021 #ifdef USE_ELF_CORE_DUMP
1026 * Modelled on fs/exec.c:aout_core_dump()
1027 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1030 * These are the only things you should do on a core-file: use only these
1031 * functions to write out all the necessary info.
1033 static int dump_write(struct file
*file
, const void *addr
, int nr
)
1035 return file
->f_op
->write(file
, addr
, nr
, &file
->f_pos
) == nr
;
1038 static int dump_seek(struct file
*file
, off_t off
)
1040 if (file
->f_op
->llseek
) {
1041 if (file
->f_op
->llseek(file
, off
, 0) != off
)
1049 * Decide whether a segment is worth dumping; default is yes to be
1050 * sure (missing info is worse than too much; etc).
1051 * Personally I'd include everything, and use the coredump limit...
1053 * I think we should skip something. But I am not sure how. H.J.
1055 static int maydump(struct vm_area_struct
*vma
)
1058 * If we may not read the contents, don't allow us to dump
1059 * them either. "dump_write()" can't handle it anyway.
1061 if (!(vma
->vm_flags
& VM_READ
))
1064 /* Do not dump I/O mapped devices! -DaveM */
1065 if (vma
->vm_flags
& VM_IO
)
1068 if (vma
->vm_flags
& (VM_WRITE
|VM_GROWSUP
|VM_GROWSDOWN
))
1070 if (vma
->vm_flags
& (VM_READ
|VM_EXEC
|VM_EXECUTABLE
|VM_SHARED
))
1076 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1078 /* An ELF note in memory */
1083 unsigned int datasz
;
1087 static int notesize(struct memelfnote
*en
)
1091 sz
= sizeof(struct elf_note
);
1092 sz
+= roundup(strlen(en
->name
) + 1, 4);
1093 sz
+= roundup(en
->datasz
, 4);
1098 #define DUMP_WRITE(addr, nr) \
1099 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1100 #define DUMP_SEEK(off) \
1101 do { if (!dump_seek(file, (off))) return 0; } while(0)
1103 static int writenote(struct memelfnote
*men
, struct file
*file
)
1107 en
.n_namesz
= strlen(men
->name
) + 1;
1108 en
.n_descsz
= men
->datasz
;
1109 en
.n_type
= men
->type
;
1111 DUMP_WRITE(&en
, sizeof(en
));
1112 DUMP_WRITE(men
->name
, en
.n_namesz
);
1113 /* XXX - cast from long long to long to avoid need for libgcc.a */
1114 DUMP_SEEK(roundup((unsigned long)file
->f_pos
, 4)); /* XXX */
1115 DUMP_WRITE(men
->data
, men
->datasz
);
1116 DUMP_SEEK(roundup((unsigned long)file
->f_pos
, 4)); /* XXX */
1123 #define DUMP_WRITE(addr, nr) \
1124 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1126 #define DUMP_SEEK(off) \
1127 if (!dump_seek(file, (off))) \
1130 static inline void fill_elf_header(struct elfhdr
*elf
, int segs
)
1132 memcpy(elf
->e_ident
, ELFMAG
, SELFMAG
);
1133 elf
->e_ident
[EI_CLASS
] = ELF_CLASS
;
1134 elf
->e_ident
[EI_DATA
] = ELF_DATA
;
1135 elf
->e_ident
[EI_VERSION
] = EV_CURRENT
;
1136 elf
->e_ident
[EI_OSABI
] = ELF_OSABI
;
1137 memset(elf
->e_ident
+EI_PAD
, 0, EI_NIDENT
-EI_PAD
);
1139 elf
->e_type
= ET_CORE
;
1140 elf
->e_machine
= ELF_ARCH
;
1141 elf
->e_version
= EV_CURRENT
;
1143 elf
->e_phoff
= sizeof(struct elfhdr
);
1146 elf
->e_ehsize
= sizeof(struct elfhdr
);
1147 elf
->e_phentsize
= sizeof(struct elf_phdr
);
1148 elf
->e_phnum
= segs
;
1149 elf
->e_shentsize
= 0;
1151 elf
->e_shstrndx
= 0;
1155 static inline void fill_elf_note_phdr(struct elf_phdr
*phdr
, int sz
, off_t offset
)
1157 phdr
->p_type
= PT_NOTE
;
1158 phdr
->p_offset
= offset
;
1161 phdr
->p_filesz
= sz
;
1168 static void fill_note(struct memelfnote
*note
, const char *name
, int type
,
1169 unsigned int sz
, void *data
)
1179 * fill up all the fields in prstatus from the given task struct, except registers
1180 * which need to be filled up separately.
1182 static void fill_prstatus(struct elf_prstatus
*prstatus
,
1183 struct task_struct
*p
, long signr
)
1185 prstatus
->pr_info
.si_signo
= prstatus
->pr_cursig
= signr
;
1186 prstatus
->pr_sigpend
= p
->pending
.signal
.sig
[0];
1187 prstatus
->pr_sighold
= p
->blocked
.sig
[0];
1188 prstatus
->pr_pid
= p
->pid
;
1189 prstatus
->pr_ppid
= p
->parent
->pid
;
1190 prstatus
->pr_pgrp
= process_group(p
);
1191 prstatus
->pr_sid
= p
->signal
->session
;
1192 if (p
->pid
== p
->tgid
) {
1194 * This is the record for the group leader. Add in the
1195 * cumulative times of previous dead threads. This total
1196 * won't include the time of each live thread whose state
1197 * is included in the core dump. The final total reported
1198 * to our parent process when it calls wait4 will include
1199 * those sums as well as the little bit more time it takes
1200 * this and each other thread to finish dying after the
1201 * core dump synchronization phase.
1203 jiffies_to_timeval(p
->utime
+ p
->signal
->utime
,
1204 &prstatus
->pr_utime
);
1205 jiffies_to_timeval(p
->stime
+ p
->signal
->stime
,
1206 &prstatus
->pr_stime
);
1208 jiffies_to_timeval(p
->utime
, &prstatus
->pr_utime
);
1209 jiffies_to_timeval(p
->stime
, &prstatus
->pr_stime
);
1211 jiffies_to_timeval(p
->signal
->cutime
, &prstatus
->pr_cutime
);
1212 jiffies_to_timeval(p
->signal
->cstime
, &prstatus
->pr_cstime
);
1215 static void fill_psinfo(struct elf_prpsinfo
*psinfo
, struct task_struct
*p
,
1216 struct mm_struct
*mm
)
1220 /* first copy the parameters from user space */
1221 memset(psinfo
, 0, sizeof(struct elf_prpsinfo
));
1223 len
= mm
->arg_end
- mm
->arg_start
;
1224 if (len
>= ELF_PRARGSZ
)
1225 len
= ELF_PRARGSZ
-1;
1226 copy_from_user(&psinfo
->pr_psargs
,
1227 (const char __user
*)mm
->arg_start
, len
);
1228 for(i
= 0; i
< len
; i
++)
1229 if (psinfo
->pr_psargs
[i
] == 0)
1230 psinfo
->pr_psargs
[i
] = ' ';
1231 psinfo
->pr_psargs
[len
] = 0;
1233 psinfo
->pr_pid
= p
->pid
;
1234 psinfo
->pr_ppid
= p
->parent
->pid
;
1235 psinfo
->pr_pgrp
= process_group(p
);
1236 psinfo
->pr_sid
= p
->signal
->session
;
1238 i
= p
->state
? ffz(~p
->state
) + 1 : 0;
1239 psinfo
->pr_state
= i
;
1240 psinfo
->pr_sname
= (i
< 0 || i
> 5) ? '.' : "RSDTZW"[i
];
1241 psinfo
->pr_zomb
= psinfo
->pr_sname
== 'Z';
1242 psinfo
->pr_nice
= task_nice(p
);
1243 psinfo
->pr_flag
= p
->flags
;
1244 SET_UID(psinfo
->pr_uid
, p
->uid
);
1245 SET_GID(psinfo
->pr_gid
, p
->gid
);
1246 strncpy(psinfo
->pr_fname
, p
->comm
, sizeof(psinfo
->pr_fname
));
1251 /* Here is the structure in which status of each thread is captured. */
1252 struct elf_thread_status
1254 struct list_head list
;
1255 struct elf_prstatus prstatus
; /* NT_PRSTATUS */
1256 elf_fpregset_t fpu
; /* NT_PRFPREG */
1257 struct task_struct
*thread
;
1258 #ifdef ELF_CORE_COPY_XFPREGS
1259 elf_fpxregset_t xfpu
; /* NT_PRXFPREG */
1261 struct memelfnote notes
[3];
1266 * In order to add the specific thread information for the elf file format,
1267 * we need to keep a linked list of every threads pr_status and then
1268 * create a single section for them in the final core file.
1270 static int elf_dump_thread_status(long signr
, struct elf_thread_status
*t
)
1273 struct task_struct
*p
= t
->thread
;
1276 fill_prstatus(&t
->prstatus
, p
, signr
);
1277 elf_core_copy_task_regs(p
, &t
->prstatus
.pr_reg
);
1279 fill_note(&t
->notes
[0], "CORE", NT_PRSTATUS
, sizeof(t
->prstatus
), &(t
->prstatus
));
1281 sz
+= notesize(&t
->notes
[0]);
1283 if ((t
->prstatus
.pr_fpvalid
= elf_core_copy_task_fpregs(p
, NULL
, &t
->fpu
))) {
1284 fill_note(&t
->notes
[1], "CORE", NT_PRFPREG
, sizeof(t
->fpu
), &(t
->fpu
));
1286 sz
+= notesize(&t
->notes
[1]);
1289 #ifdef ELF_CORE_COPY_XFPREGS
1290 if (elf_core_copy_task_xfpregs(p
, &t
->xfpu
)) {
1291 fill_note(&t
->notes
[2], "LINUX", NT_PRXFPREG
, sizeof(t
->xfpu
), &t
->xfpu
);
1293 sz
+= notesize(&t
->notes
[2]);
1302 * This is a two-pass process; first we find the offsets of the bits,
1303 * and then they are actually written out. If we run out of core limit
1306 static int elf_core_dump(long signr
, struct pt_regs
* regs
, struct file
* file
)
1314 struct vm_area_struct
*vma
;
1315 struct elfhdr
*elf
= NULL
;
1316 off_t offset
= 0, dataoff
;
1317 unsigned long limit
= current
->rlim
[RLIMIT_CORE
].rlim_cur
;
1319 struct memelfnote
*notes
= NULL
;
1320 struct elf_prstatus
*prstatus
= NULL
; /* NT_PRSTATUS */
1321 struct elf_prpsinfo
*psinfo
= NULL
; /* NT_PRPSINFO */
1322 struct task_struct
*g
, *p
;
1323 LIST_HEAD(thread_list
);
1324 struct list_head
*t
;
1325 elf_fpregset_t
*fpu
= NULL
;
1326 #ifdef ELF_CORE_COPY_XFPREGS
1327 elf_fpxregset_t
*xfpu
= NULL
;
1329 int thread_status_size
= 0;
1333 * We no longer stop all VM operations.
1335 * This is because those proceses that could possibly change map_count or
1336 * the mmap / vma pages are now blocked in do_exit on current finishing
1339 * Only ptrace can touch these memory addresses, but it doesn't change
1340 * the map_count or the pages allocated. So no possibility of crashing
1341 * exists while dumping the mm->vm_next areas to the core file.
1344 /* alloc memory for large data structures: too large to be on stack */
1345 elf
= kmalloc(sizeof(*elf
), GFP_KERNEL
);
1348 prstatus
= kmalloc(sizeof(*prstatus
), GFP_KERNEL
);
1351 psinfo
= kmalloc(sizeof(*psinfo
), GFP_KERNEL
);
1354 notes
= kmalloc(NUM_NOTES
* sizeof(struct memelfnote
), GFP_KERNEL
);
1357 fpu
= kmalloc(sizeof(*fpu
), GFP_KERNEL
);
1360 #ifdef ELF_CORE_COPY_XFPREGS
1361 xfpu
= kmalloc(sizeof(*xfpu
), GFP_KERNEL
);
1367 struct elf_thread_status
*tmp
;
1368 read_lock(&tasklist_lock
);
1370 if (current
->mm
== p
->mm
&& current
!= p
) {
1371 tmp
= kmalloc(sizeof(*tmp
), GFP_ATOMIC
);
1373 read_unlock(&tasklist_lock
);
1376 memset(tmp
, 0, sizeof(*tmp
));
1377 INIT_LIST_HEAD(&tmp
->list
);
1379 list_add(&tmp
->list
, &thread_list
);
1381 while_each_thread(g
,p
);
1382 read_unlock(&tasklist_lock
);
1383 list_for_each(t
, &thread_list
) {
1384 struct elf_thread_status
*tmp
;
1387 tmp
= list_entry(t
, struct elf_thread_status
, list
);
1388 sz
= elf_dump_thread_status(signr
, tmp
);
1389 thread_status_size
+= sz
;
1392 /* now collect the dump for the current */
1393 memset(prstatus
, 0, sizeof(*prstatus
));
1394 fill_prstatus(prstatus
, current
, signr
);
1395 elf_core_copy_regs(&prstatus
->pr_reg
, regs
);
1397 segs
= current
->mm
->map_count
;
1398 #ifdef ELF_CORE_EXTRA_PHDRS
1399 segs
+= ELF_CORE_EXTRA_PHDRS
;
1403 fill_elf_header(elf
, segs
+1); /* including notes section */
1406 current
->flags
|= PF_DUMPCORE
;
1409 * Set up the notes in similar form to SVR4 core dumps made
1410 * with info from their /proc.
1413 fill_note(notes
+0, "CORE", NT_PRSTATUS
, sizeof(*prstatus
), prstatus
);
1415 fill_psinfo(psinfo
, current
->group_leader
, current
->mm
);
1416 fill_note(notes
+1, "CORE", NT_PRPSINFO
, sizeof(*psinfo
), psinfo
);
1418 fill_note(notes
+2, "CORE", NT_TASKSTRUCT
, sizeof(*current
), current
);
1422 auxv
= (elf_addr_t
*) current
->mm
->saved_auxv
;
1427 while (auxv
[i
- 2] != AT_NULL
);
1428 fill_note(¬es
[numnote
++], "CORE", NT_AUXV
,
1429 i
* sizeof (elf_addr_t
), auxv
);
1431 /* Try to dump the FPU. */
1432 if ((prstatus
->pr_fpvalid
= elf_core_copy_task_fpregs(current
, regs
, fpu
)))
1433 fill_note(notes
+ numnote
++,
1434 "CORE", NT_PRFPREG
, sizeof(*fpu
), fpu
);
1435 #ifdef ELF_CORE_COPY_XFPREGS
1436 if (elf_core_copy_task_xfpregs(current
, xfpu
))
1437 fill_note(notes
+ numnote
++,
1438 "LINUX", NT_PRXFPREG
, sizeof(*xfpu
), xfpu
);
1444 DUMP_WRITE(elf
, sizeof(*elf
));
1445 offset
+= sizeof(*elf
); /* Elf header */
1446 offset
+= (segs
+1) * sizeof(struct elf_phdr
); /* Program headers */
1448 /* Write notes phdr entry */
1450 struct elf_phdr phdr
;
1453 for (i
= 0; i
< numnote
; i
++)
1454 sz
+= notesize(notes
+ i
);
1456 sz
+= thread_status_size
;
1458 fill_elf_note_phdr(&phdr
, sz
, offset
);
1460 DUMP_WRITE(&phdr
, sizeof(phdr
));
1463 /* Page-align dumped data */
1464 dataoff
= offset
= roundup(offset
, ELF_EXEC_PAGESIZE
);
1466 /* Write program headers for segments dump */
1467 for (vma
= current
->mm
->mmap
; vma
!= NULL
; vma
= vma
->vm_next
) {
1468 struct elf_phdr phdr
;
1471 sz
= vma
->vm_end
- vma
->vm_start
;
1473 phdr
.p_type
= PT_LOAD
;
1474 phdr
.p_offset
= offset
;
1475 phdr
.p_vaddr
= vma
->vm_start
;
1477 phdr
.p_filesz
= maydump(vma
) ? sz
: 0;
1479 offset
+= phdr
.p_filesz
;
1480 phdr
.p_flags
= vma
->vm_flags
& VM_READ
? PF_R
: 0;
1481 if (vma
->vm_flags
& VM_WRITE
) phdr
.p_flags
|= PF_W
;
1482 if (vma
->vm_flags
& VM_EXEC
) phdr
.p_flags
|= PF_X
;
1483 phdr
.p_align
= ELF_EXEC_PAGESIZE
;
1485 DUMP_WRITE(&phdr
, sizeof(phdr
));
1488 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1489 ELF_CORE_WRITE_EXTRA_PHDRS
;
1492 /* write out the notes section */
1493 for (i
= 0; i
< numnote
; i
++)
1494 if (!writenote(notes
+ i
, file
))
1497 /* write out the thread status notes section */
1498 list_for_each(t
, &thread_list
) {
1499 struct elf_thread_status
*tmp
= list_entry(t
, struct elf_thread_status
, list
);
1500 for (i
= 0; i
< tmp
->num_notes
; i
++)
1501 if (!writenote(&tmp
->notes
[i
], file
))
1507 for (vma
= current
->mm
->mmap
; vma
!= NULL
; vma
= vma
->vm_next
) {
1513 for (addr
= vma
->vm_start
;
1515 addr
+= PAGE_SIZE
) {
1517 struct vm_area_struct
*vma
;
1519 if (get_user_pages(current
, current
->mm
, addr
, 1, 0, 1,
1520 &page
, &vma
) <= 0) {
1521 DUMP_SEEK (file
->f_pos
+ PAGE_SIZE
);
1523 if (page
== ZERO_PAGE(addr
)) {
1524 DUMP_SEEK (file
->f_pos
+ PAGE_SIZE
);
1527 flush_cache_page(vma
, addr
);
1529 if ((size
+= PAGE_SIZE
) > limit
||
1530 !dump_write(file
, kaddr
,
1533 page_cache_release(page
);
1538 page_cache_release(page
);
1543 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1544 ELF_CORE_WRITE_EXTRA_DATA
;
1547 if ((off_t
) file
->f_pos
!= offset
) {
1549 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1550 (off_t
) file
->f_pos
, offset
);
1557 while(!list_empty(&thread_list
)) {
1558 struct list_head
*tmp
= thread_list
.next
;
1560 kfree(list_entry(tmp
, struct elf_thread_status
, list
));
1568 #ifdef ELF_CORE_COPY_XFPREGS
1575 #endif /* USE_ELF_CORE_DUMP */
1577 static int __init
init_elf_binfmt(void)
1579 return register_binfmt(&elf_format
);
1582 static void __exit
exit_elf_binfmt(void)
1584 /* Remove the COFF and ELF loaders. */
1585 unregister_binfmt(&elf_format
);
1588 core_initcall(init_elf_binfmt
);
1589 module_exit(exit_elf_binfmt
);
1590 MODULE_LICENSE("GPL");