HID: hiddev: Fix slab-out-of-bounds write in hiddev_ioctl_usage()
[linux/fpc-iii.git] / fs / binfmt_elf.c
blobeddf5746cf515dc4c0fac387dd408ed62264303f
1 /*
2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/fs.h>
15 #include <linux/mm.h>
16 #include <linux/mman.h>
17 #include <linux/errno.h>
18 #include <linux/signal.h>
19 #include <linux/binfmts.h>
20 #include <linux/string.h>
21 #include <linux/file.h>
22 #include <linux/slab.h>
23 #include <linux/personality.h>
24 #include <linux/elfcore.h>
25 #include <linux/init.h>
26 #include <linux/highuid.h>
27 #include <linux/compiler.h>
28 #include <linux/highmem.h>
29 #include <linux/pagemap.h>
30 #include <linux/vmalloc.h>
31 #include <linux/security.h>
32 #include <linux/random.h>
33 #include <linux/elf.h>
34 #include <linux/elf-randomize.h>
35 #include <linux/utsname.h>
36 #include <linux/coredump.h>
37 #include <linux/sched.h>
38 #include <linux/dax.h>
39 #include <asm/uaccess.h>
40 #include <asm/param.h>
41 #include <asm/page.h>
43 #ifndef user_long_t
44 #define user_long_t long
45 #endif
46 #ifndef user_siginfo_t
47 #define user_siginfo_t siginfo_t
48 #endif
50 static int load_elf_binary(struct linux_binprm *bprm);
51 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
52 int, int, unsigned long);
54 #ifdef CONFIG_USELIB
55 static int load_elf_library(struct file *);
56 #else
57 #define load_elf_library NULL
58 #endif
61 * If we don't support core dumping, then supply a NULL so we
62 * don't even try.
64 #ifdef CONFIG_ELF_CORE
65 static int elf_core_dump(struct coredump_params *cprm);
66 #else
67 #define elf_core_dump NULL
68 #endif
70 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
71 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
72 #else
73 #define ELF_MIN_ALIGN PAGE_SIZE
74 #endif
76 #ifndef ELF_CORE_EFLAGS
77 #define ELF_CORE_EFLAGS 0
78 #endif
80 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
81 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
82 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
84 static struct linux_binfmt elf_format = {
85 .module = THIS_MODULE,
86 .load_binary = load_elf_binary,
87 .load_shlib = load_elf_library,
88 .core_dump = elf_core_dump,
89 .min_coredump = ELF_EXEC_PAGESIZE,
92 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
94 static int set_brk(unsigned long start, unsigned long end)
96 start = ELF_PAGEALIGN(start);
97 end = ELF_PAGEALIGN(end);
98 if (end > start) {
99 unsigned long addr;
100 addr = vm_brk(start, end - start);
101 if (BAD_ADDR(addr))
102 return addr;
104 current->mm->start_brk = current->mm->brk = end;
105 return 0;
108 /* We need to explicitly zero any fractional pages
109 after the data section (i.e. bss). This would
110 contain the junk from the file that should not
111 be in memory
113 static int padzero(unsigned long elf_bss)
115 unsigned long nbyte;
117 nbyte = ELF_PAGEOFFSET(elf_bss);
118 if (nbyte) {
119 nbyte = ELF_MIN_ALIGN - nbyte;
120 if (clear_user((void __user *) elf_bss, nbyte))
121 return -EFAULT;
123 return 0;
126 /* Let's use some macros to make this stack manipulation a little clearer */
127 #ifdef CONFIG_STACK_GROWSUP
128 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
129 #define STACK_ROUND(sp, items) \
130 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
131 #define STACK_ALLOC(sp, len) ({ \
132 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
133 old_sp; })
134 #else
135 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
136 #define STACK_ROUND(sp, items) \
137 (((unsigned long) (sp - items)) &~ 15UL)
138 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
139 #endif
141 #ifndef ELF_BASE_PLATFORM
143 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
144 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
145 * will be copied to the user stack in the same manner as AT_PLATFORM.
147 #define ELF_BASE_PLATFORM NULL
148 #endif
150 static int
151 create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
152 unsigned long load_addr, unsigned long interp_load_addr)
154 unsigned long p = bprm->p;
155 int argc = bprm->argc;
156 int envc = bprm->envc;
157 elf_addr_t __user *argv;
158 elf_addr_t __user *envp;
159 elf_addr_t __user *sp;
160 elf_addr_t __user *u_platform;
161 elf_addr_t __user *u_base_platform;
162 elf_addr_t __user *u_rand_bytes;
163 const char *k_platform = ELF_PLATFORM;
164 const char *k_base_platform = ELF_BASE_PLATFORM;
165 unsigned char k_rand_bytes[16];
166 int items;
167 elf_addr_t *elf_info;
168 int ei_index = 0;
169 const struct cred *cred = current_cred();
170 struct vm_area_struct *vma;
173 * In some cases (e.g. Hyper-Threading), we want to avoid L1
174 * evictions by the processes running on the same package. One
175 * thing we can do is to shuffle the initial stack for them.
178 p = arch_align_stack(p);
181 * If this architecture has a platform capability string, copy it
182 * to userspace. In some cases (Sparc), this info is impossible
183 * for userspace to get any other way, in others (i386) it is
184 * merely difficult.
186 u_platform = NULL;
187 if (k_platform) {
188 size_t len = strlen(k_platform) + 1;
190 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
191 if (__copy_to_user(u_platform, k_platform, len))
192 return -EFAULT;
196 * If this architecture has a "base" platform capability
197 * string, copy it to userspace.
199 u_base_platform = NULL;
200 if (k_base_platform) {
201 size_t len = strlen(k_base_platform) + 1;
203 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
204 if (__copy_to_user(u_base_platform, k_base_platform, len))
205 return -EFAULT;
209 * Generate 16 random bytes for userspace PRNG seeding.
211 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
212 u_rand_bytes = (elf_addr_t __user *)
213 STACK_ALLOC(p, sizeof(k_rand_bytes));
214 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
215 return -EFAULT;
217 /* Create the ELF interpreter info */
218 elf_info = (elf_addr_t *)current->mm->saved_auxv;
219 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
220 #define NEW_AUX_ENT(id, val) \
221 do { \
222 elf_info[ei_index++] = id; \
223 elf_info[ei_index++] = val; \
224 } while (0)
226 #ifdef ARCH_DLINFO
228 * ARCH_DLINFO must come first so PPC can do its special alignment of
229 * AUXV.
230 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
231 * ARCH_DLINFO changes
233 ARCH_DLINFO;
234 #endif
235 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
236 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
237 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
238 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
239 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
240 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
241 NEW_AUX_ENT(AT_BASE, interp_load_addr);
242 NEW_AUX_ENT(AT_FLAGS, 0);
243 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
244 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
245 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
246 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
247 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
248 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
249 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
250 #ifdef ELF_HWCAP2
251 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
252 #endif
253 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
254 if (k_platform) {
255 NEW_AUX_ENT(AT_PLATFORM,
256 (elf_addr_t)(unsigned long)u_platform);
258 if (k_base_platform) {
259 NEW_AUX_ENT(AT_BASE_PLATFORM,
260 (elf_addr_t)(unsigned long)u_base_platform);
262 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
263 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
265 #undef NEW_AUX_ENT
266 /* AT_NULL is zero; clear the rest too */
267 memset(&elf_info[ei_index], 0,
268 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
270 /* And advance past the AT_NULL entry. */
271 ei_index += 2;
273 sp = STACK_ADD(p, ei_index);
275 items = (argc + 1) + (envc + 1) + 1;
276 bprm->p = STACK_ROUND(sp, items);
278 /* Point sp at the lowest address on the stack */
279 #ifdef CONFIG_STACK_GROWSUP
280 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
281 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
282 #else
283 sp = (elf_addr_t __user *)bprm->p;
284 #endif
288 * Grow the stack manually; some architectures have a limit on how
289 * far ahead a user-space access may be in order to grow the stack.
291 vma = find_extend_vma(current->mm, bprm->p);
292 if (!vma)
293 return -EFAULT;
295 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
296 if (__put_user(argc, sp++))
297 return -EFAULT;
298 argv = sp;
299 envp = argv + argc + 1;
301 /* Populate argv and envp */
302 p = current->mm->arg_end = current->mm->arg_start;
303 while (argc-- > 0) {
304 size_t len;
305 if (__put_user((elf_addr_t)p, argv++))
306 return -EFAULT;
307 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
308 if (!len || len > MAX_ARG_STRLEN)
309 return -EINVAL;
310 p += len;
312 if (__put_user(0, argv))
313 return -EFAULT;
314 current->mm->arg_end = current->mm->env_start = p;
315 while (envc-- > 0) {
316 size_t len;
317 if (__put_user((elf_addr_t)p, envp++))
318 return -EFAULT;
319 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
320 if (!len || len > MAX_ARG_STRLEN)
321 return -EINVAL;
322 p += len;
324 if (__put_user(0, envp))
325 return -EFAULT;
326 current->mm->env_end = p;
328 /* Put the elf_info on the stack in the right place. */
329 sp = (elf_addr_t __user *)envp + 1;
330 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
331 return -EFAULT;
332 return 0;
335 #ifndef elf_map
337 static unsigned long elf_map(struct file *filep, unsigned long addr,
338 struct elf_phdr *eppnt, int prot, int type,
339 unsigned long total_size)
341 unsigned long map_addr;
342 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
343 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
344 addr = ELF_PAGESTART(addr);
345 size = ELF_PAGEALIGN(size);
347 /* mmap() will return -EINVAL if given a zero size, but a
348 * segment with zero filesize is perfectly valid */
349 if (!size)
350 return addr;
353 * total_size is the size of the ELF (interpreter) image.
354 * The _first_ mmap needs to know the full size, otherwise
355 * randomization might put this image into an overlapping
356 * position with the ELF binary image. (since size < total_size)
357 * So we first map the 'big' image - and unmap the remainder at
358 * the end. (which unmap is needed for ELF images with holes.)
360 if (total_size) {
361 total_size = ELF_PAGEALIGN(total_size);
362 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
363 if (!BAD_ADDR(map_addr))
364 vm_munmap(map_addr+size, total_size-size);
365 } else
366 map_addr = vm_mmap(filep, addr, size, prot, type, off);
368 return(map_addr);
371 #endif /* !elf_map */
373 static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
375 int i, first_idx = -1, last_idx = -1;
377 for (i = 0; i < nr; i++) {
378 if (cmds[i].p_type == PT_LOAD) {
379 last_idx = i;
380 if (first_idx == -1)
381 first_idx = i;
384 if (first_idx == -1)
385 return 0;
387 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
388 ELF_PAGESTART(cmds[first_idx].p_vaddr);
392 * load_elf_phdrs() - load ELF program headers
393 * @elf_ex: ELF header of the binary whose program headers should be loaded
394 * @elf_file: the opened ELF binary file
396 * Loads ELF program headers from the binary file elf_file, which has the ELF
397 * header pointed to by elf_ex, into a newly allocated array. The caller is
398 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
400 static struct elf_phdr *load_elf_phdrs(struct elfhdr *elf_ex,
401 struct file *elf_file)
403 struct elf_phdr *elf_phdata = NULL;
404 int retval, size, err = -1;
407 * If the size of this structure has changed, then punt, since
408 * we will be doing the wrong thing.
410 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
411 goto out;
413 /* Sanity check the number of program headers... */
414 if (elf_ex->e_phnum < 1 ||
415 elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
416 goto out;
418 /* ...and their total size. */
419 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
420 if (size > ELF_MIN_ALIGN)
421 goto out;
423 elf_phdata = kmalloc(size, GFP_KERNEL);
424 if (!elf_phdata)
425 goto out;
427 /* Read in the program headers */
428 retval = kernel_read(elf_file, elf_ex->e_phoff,
429 (char *)elf_phdata, size);
430 if (retval != size) {
431 err = (retval < 0) ? retval : -EIO;
432 goto out;
435 /* Success! */
436 err = 0;
437 out:
438 if (err) {
439 kfree(elf_phdata);
440 elf_phdata = NULL;
442 return elf_phdata;
445 #ifndef CONFIG_ARCH_BINFMT_ELF_STATE
448 * struct arch_elf_state - arch-specific ELF loading state
450 * This structure is used to preserve architecture specific data during
451 * the loading of an ELF file, throughout the checking of architecture
452 * specific ELF headers & through to the point where the ELF load is
453 * known to be proceeding (ie. SET_PERSONALITY).
455 * This implementation is a dummy for architectures which require no
456 * specific state.
458 struct arch_elf_state {
461 #define INIT_ARCH_ELF_STATE {}
464 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
465 * @ehdr: The main ELF header
466 * @phdr: The program header to check
467 * @elf: The open ELF file
468 * @is_interp: True if the phdr is from the interpreter of the ELF being
469 * loaded, else false.
470 * @state: Architecture-specific state preserved throughout the process
471 * of loading the ELF.
473 * Inspects the program header phdr to validate its correctness and/or
474 * suitability for the system. Called once per ELF program header in the
475 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
476 * interpreter.
478 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
479 * with that return code.
481 static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
482 struct elf_phdr *phdr,
483 struct file *elf, bool is_interp,
484 struct arch_elf_state *state)
486 /* Dummy implementation, always proceed */
487 return 0;
491 * arch_check_elf() - check an ELF executable
492 * @ehdr: The main ELF header
493 * @has_interp: True if the ELF has an interpreter, else false.
494 * @state: Architecture-specific state preserved throughout the process
495 * of loading the ELF.
497 * Provides a final opportunity for architecture code to reject the loading
498 * of the ELF & cause an exec syscall to return an error. This is called after
499 * all program headers to be checked by arch_elf_pt_proc have been.
501 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
502 * with that return code.
504 static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
505 struct arch_elf_state *state)
507 /* Dummy implementation, always proceed */
508 return 0;
511 #endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
513 /* This is much more generalized than the library routine read function,
514 so we keep this separate. Technically the library read function
515 is only provided so that we can read a.out libraries that have
516 an ELF header */
518 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
519 struct file *interpreter, unsigned long *interp_map_addr,
520 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
522 struct elf_phdr *eppnt;
523 unsigned long load_addr = 0;
524 int load_addr_set = 0;
525 unsigned long last_bss = 0, elf_bss = 0;
526 unsigned long error = ~0UL;
527 unsigned long total_size;
528 int i;
530 /* First of all, some simple consistency checks */
531 if (interp_elf_ex->e_type != ET_EXEC &&
532 interp_elf_ex->e_type != ET_DYN)
533 goto out;
534 if (!elf_check_arch(interp_elf_ex))
535 goto out;
536 if (!interpreter->f_op->mmap)
537 goto out;
539 total_size = total_mapping_size(interp_elf_phdata,
540 interp_elf_ex->e_phnum);
541 if (!total_size) {
542 error = -EINVAL;
543 goto out;
546 eppnt = interp_elf_phdata;
547 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
548 if (eppnt->p_type == PT_LOAD) {
549 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
550 int elf_prot = 0;
551 unsigned long vaddr = 0;
552 unsigned long k, map_addr;
554 if (eppnt->p_flags & PF_R)
555 elf_prot = PROT_READ;
556 if (eppnt->p_flags & PF_W)
557 elf_prot |= PROT_WRITE;
558 if (eppnt->p_flags & PF_X)
559 elf_prot |= PROT_EXEC;
560 vaddr = eppnt->p_vaddr;
561 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
562 elf_type |= MAP_FIXED;
563 else if (no_base && interp_elf_ex->e_type == ET_DYN)
564 load_addr = -vaddr;
566 map_addr = elf_map(interpreter, load_addr + vaddr,
567 eppnt, elf_prot, elf_type, total_size);
568 total_size = 0;
569 if (!*interp_map_addr)
570 *interp_map_addr = map_addr;
571 error = map_addr;
572 if (BAD_ADDR(map_addr))
573 goto out;
575 if (!load_addr_set &&
576 interp_elf_ex->e_type == ET_DYN) {
577 load_addr = map_addr - ELF_PAGESTART(vaddr);
578 load_addr_set = 1;
582 * Check to see if the section's size will overflow the
583 * allowed task size. Note that p_filesz must always be
584 * <= p_memsize so it's only necessary to check p_memsz.
586 k = load_addr + eppnt->p_vaddr;
587 if (BAD_ADDR(k) ||
588 eppnt->p_filesz > eppnt->p_memsz ||
589 eppnt->p_memsz > TASK_SIZE ||
590 TASK_SIZE - eppnt->p_memsz < k) {
591 error = -ENOMEM;
592 goto out;
596 * Find the end of the file mapping for this phdr, and
597 * keep track of the largest address we see for this.
599 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
600 if (k > elf_bss)
601 elf_bss = k;
604 * Do the same thing for the memory mapping - between
605 * elf_bss and last_bss is the bss section.
607 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
608 if (k > last_bss)
609 last_bss = k;
614 * Now fill out the bss section: first pad the last page from
615 * the file up to the page boundary, and zero it from elf_bss
616 * up to the end of the page.
618 if (padzero(elf_bss)) {
619 error = -EFAULT;
620 goto out;
623 * Next, align both the file and mem bss up to the page size,
624 * since this is where elf_bss was just zeroed up to, and where
625 * last_bss will end after the vm_brk() below.
627 elf_bss = ELF_PAGEALIGN(elf_bss);
628 last_bss = ELF_PAGEALIGN(last_bss);
629 /* Finally, if there is still more bss to allocate, do it. */
630 if (last_bss > elf_bss) {
631 error = vm_brk(elf_bss, last_bss - elf_bss);
632 if (BAD_ADDR(error))
633 goto out;
636 error = load_addr;
637 out:
638 return error;
642 * These are the functions used to load ELF style executables and shared
643 * libraries. There is no binary dependent code anywhere else.
646 #ifndef STACK_RND_MASK
647 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
648 #endif
650 static unsigned long randomize_stack_top(unsigned long stack_top)
652 unsigned long random_variable = 0;
654 if ((current->flags & PF_RANDOMIZE) &&
655 !(current->personality & ADDR_NO_RANDOMIZE)) {
656 random_variable = (unsigned long) get_random_int();
657 random_variable &= STACK_RND_MASK;
658 random_variable <<= PAGE_SHIFT;
660 #ifdef CONFIG_STACK_GROWSUP
661 return PAGE_ALIGN(stack_top) + random_variable;
662 #else
663 return PAGE_ALIGN(stack_top) - random_variable;
664 #endif
667 static int load_elf_binary(struct linux_binprm *bprm)
669 struct file *interpreter = NULL; /* to shut gcc up */
670 unsigned long load_addr = 0, load_bias = 0;
671 int load_addr_set = 0;
672 char * elf_interpreter = NULL;
673 unsigned long error;
674 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
675 unsigned long elf_bss, elf_brk;
676 int retval, i;
677 unsigned long elf_entry;
678 unsigned long interp_load_addr = 0;
679 unsigned long start_code, end_code, start_data, end_data;
680 unsigned long reloc_func_desc __maybe_unused = 0;
681 int executable_stack = EXSTACK_DEFAULT;
682 struct pt_regs *regs = current_pt_regs();
683 struct {
684 struct elfhdr elf_ex;
685 struct elfhdr interp_elf_ex;
686 } *loc;
687 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
689 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
690 if (!loc) {
691 retval = -ENOMEM;
692 goto out_ret;
695 /* Get the exec-header */
696 loc->elf_ex = *((struct elfhdr *)bprm->buf);
698 retval = -ENOEXEC;
699 /* First of all, some simple consistency checks */
700 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
701 goto out;
703 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
704 goto out;
705 if (!elf_check_arch(&loc->elf_ex))
706 goto out;
707 if (!bprm->file->f_op->mmap)
708 goto out;
710 elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file);
711 if (!elf_phdata)
712 goto out;
714 elf_ppnt = elf_phdata;
715 elf_bss = 0;
716 elf_brk = 0;
718 start_code = ~0UL;
719 end_code = 0;
720 start_data = 0;
721 end_data = 0;
723 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
724 if (elf_ppnt->p_type == PT_INTERP) {
725 /* This is the program interpreter used for
726 * shared libraries - for now assume that this
727 * is an a.out format binary
729 retval = -ENOEXEC;
730 if (elf_ppnt->p_filesz > PATH_MAX ||
731 elf_ppnt->p_filesz < 2)
732 goto out_free_ph;
734 retval = -ENOMEM;
735 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
736 GFP_KERNEL);
737 if (!elf_interpreter)
738 goto out_free_ph;
740 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
741 elf_interpreter,
742 elf_ppnt->p_filesz);
743 if (retval != elf_ppnt->p_filesz) {
744 if (retval >= 0)
745 retval = -EIO;
746 goto out_free_interp;
748 /* make sure path is NULL terminated */
749 retval = -ENOEXEC;
750 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
751 goto out_free_interp;
753 interpreter = open_exec(elf_interpreter);
754 retval = PTR_ERR(interpreter);
755 if (IS_ERR(interpreter))
756 goto out_free_interp;
759 * If the binary is not readable then enforce
760 * mm->dumpable = 0 regardless of the interpreter's
761 * permissions.
763 would_dump(bprm, interpreter);
765 /* Get the exec headers */
766 retval = kernel_read(interpreter, 0,
767 (void *)&loc->interp_elf_ex,
768 sizeof(loc->interp_elf_ex));
769 if (retval != sizeof(loc->interp_elf_ex)) {
770 if (retval >= 0)
771 retval = -EIO;
772 goto out_free_dentry;
775 break;
777 elf_ppnt++;
780 elf_ppnt = elf_phdata;
781 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
782 switch (elf_ppnt->p_type) {
783 case PT_GNU_STACK:
784 if (elf_ppnt->p_flags & PF_X)
785 executable_stack = EXSTACK_ENABLE_X;
786 else
787 executable_stack = EXSTACK_DISABLE_X;
788 break;
790 case PT_LOPROC ... PT_HIPROC:
791 retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt,
792 bprm->file, false,
793 &arch_state);
794 if (retval)
795 goto out_free_dentry;
796 break;
799 /* Some simple consistency checks for the interpreter */
800 if (elf_interpreter) {
801 retval = -ELIBBAD;
802 /* Not an ELF interpreter */
803 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
804 goto out_free_dentry;
805 /* Verify the interpreter has a valid arch */
806 if (!elf_check_arch(&loc->interp_elf_ex))
807 goto out_free_dentry;
809 /* Load the interpreter program headers */
810 interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex,
811 interpreter);
812 if (!interp_elf_phdata)
813 goto out_free_dentry;
815 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
816 elf_ppnt = interp_elf_phdata;
817 for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++)
818 switch (elf_ppnt->p_type) {
819 case PT_LOPROC ... PT_HIPROC:
820 retval = arch_elf_pt_proc(&loc->interp_elf_ex,
821 elf_ppnt, interpreter,
822 true, &arch_state);
823 if (retval)
824 goto out_free_dentry;
825 break;
830 * Allow arch code to reject the ELF at this point, whilst it's
831 * still possible to return an error to the code that invoked
832 * the exec syscall.
834 retval = arch_check_elf(&loc->elf_ex, !!interpreter, &arch_state);
835 if (retval)
836 goto out_free_dentry;
838 /* Flush all traces of the currently running executable */
839 retval = flush_old_exec(bprm);
840 if (retval)
841 goto out_free_dentry;
843 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
844 may depend on the personality. */
845 SET_PERSONALITY2(loc->elf_ex, &arch_state);
846 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
847 current->personality |= READ_IMPLIES_EXEC;
849 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
850 current->flags |= PF_RANDOMIZE;
852 setup_new_exec(bprm);
853 install_exec_creds(bprm);
855 /* Do this so that we can load the interpreter, if need be. We will
856 change some of these later */
857 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
858 executable_stack);
859 if (retval < 0)
860 goto out_free_dentry;
862 current->mm->start_stack = bprm->p;
864 /* Now we do a little grungy work by mmapping the ELF image into
865 the correct location in memory. */
866 for(i = 0, elf_ppnt = elf_phdata;
867 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
868 int elf_prot = 0, elf_flags;
869 unsigned long k, vaddr;
870 unsigned long total_size = 0;
872 if (elf_ppnt->p_type != PT_LOAD)
873 continue;
875 if (unlikely (elf_brk > elf_bss)) {
876 unsigned long nbyte;
878 /* There was a PT_LOAD segment with p_memsz > p_filesz
879 before this one. Map anonymous pages, if needed,
880 and clear the area. */
881 retval = set_brk(elf_bss + load_bias,
882 elf_brk + load_bias);
883 if (retval)
884 goto out_free_dentry;
885 nbyte = ELF_PAGEOFFSET(elf_bss);
886 if (nbyte) {
887 nbyte = ELF_MIN_ALIGN - nbyte;
888 if (nbyte > elf_brk - elf_bss)
889 nbyte = elf_brk - elf_bss;
890 if (clear_user((void __user *)elf_bss +
891 load_bias, nbyte)) {
893 * This bss-zeroing can fail if the ELF
894 * file specifies odd protections. So
895 * we don't check the return value
901 if (elf_ppnt->p_flags & PF_R)
902 elf_prot |= PROT_READ;
903 if (elf_ppnt->p_flags & PF_W)
904 elf_prot |= PROT_WRITE;
905 if (elf_ppnt->p_flags & PF_X)
906 elf_prot |= PROT_EXEC;
908 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
910 vaddr = elf_ppnt->p_vaddr;
912 * If we are loading ET_EXEC or we have already performed
913 * the ET_DYN load_addr calculations, proceed normally.
915 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
916 elf_flags |= MAP_FIXED;
917 } else if (loc->elf_ex.e_type == ET_DYN) {
919 * This logic is run once for the first LOAD Program
920 * Header for ET_DYN binaries to calculate the
921 * randomization (load_bias) for all the LOAD
922 * Program Headers, and to calculate the entire
923 * size of the ELF mapping (total_size). (Note that
924 * load_addr_set is set to true later once the
925 * initial mapping is performed.)
927 * There are effectively two types of ET_DYN
928 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
929 * and loaders (ET_DYN without INTERP, since they
930 * _are_ the ELF interpreter). The loaders must
931 * be loaded away from programs since the program
932 * may otherwise collide with the loader (especially
933 * for ET_EXEC which does not have a randomized
934 * position). For example to handle invocations of
935 * "./ld.so someprog" to test out a new version of
936 * the loader, the subsequent program that the
937 * loader loads must avoid the loader itself, so
938 * they cannot share the same load range. Sufficient
939 * room for the brk must be allocated with the
940 * loader as well, since brk must be available with
941 * the loader.
943 * Therefore, programs are loaded offset from
944 * ELF_ET_DYN_BASE and loaders are loaded into the
945 * independently randomized mmap region (0 load_bias
946 * without MAP_FIXED).
948 if (elf_interpreter) {
949 load_bias = ELF_ET_DYN_BASE;
950 if (current->flags & PF_RANDOMIZE)
951 load_bias += arch_mmap_rnd();
952 elf_flags |= MAP_FIXED;
953 } else
954 load_bias = 0;
957 * Since load_bias is used for all subsequent loading
958 * calculations, we must lower it by the first vaddr
959 * so that the remaining calculations based on the
960 * ELF vaddrs will be correctly offset. The result
961 * is then page aligned.
963 load_bias = ELF_PAGESTART(load_bias - vaddr);
965 total_size = total_mapping_size(elf_phdata,
966 loc->elf_ex.e_phnum);
967 if (!total_size) {
968 retval = -EINVAL;
969 goto out_free_dentry;
973 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
974 elf_prot, elf_flags, total_size);
975 if (BAD_ADDR(error)) {
976 retval = IS_ERR((void *)error) ?
977 PTR_ERR((void*)error) : -EINVAL;
978 goto out_free_dentry;
981 if (!load_addr_set) {
982 load_addr_set = 1;
983 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
984 if (loc->elf_ex.e_type == ET_DYN) {
985 load_bias += error -
986 ELF_PAGESTART(load_bias + vaddr);
987 load_addr += load_bias;
988 reloc_func_desc = load_bias;
991 k = elf_ppnt->p_vaddr;
992 if (k < start_code)
993 start_code = k;
994 if (start_data < k)
995 start_data = k;
998 * Check to see if the section's size will overflow the
999 * allowed task size. Note that p_filesz must always be
1000 * <= p_memsz so it is only necessary to check p_memsz.
1002 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
1003 elf_ppnt->p_memsz > TASK_SIZE ||
1004 TASK_SIZE - elf_ppnt->p_memsz < k) {
1005 /* set_brk can never work. Avoid overflows. */
1006 retval = -EINVAL;
1007 goto out_free_dentry;
1010 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1012 if (k > elf_bss)
1013 elf_bss = k;
1014 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1015 end_code = k;
1016 if (end_data < k)
1017 end_data = k;
1018 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1019 if (k > elf_brk)
1020 elf_brk = k;
1023 loc->elf_ex.e_entry += load_bias;
1024 elf_bss += load_bias;
1025 elf_brk += load_bias;
1026 start_code += load_bias;
1027 end_code += load_bias;
1028 start_data += load_bias;
1029 end_data += load_bias;
1031 /* Calling set_brk effectively mmaps the pages that we need
1032 * for the bss and break sections. We must do this before
1033 * mapping in the interpreter, to make sure it doesn't wind
1034 * up getting placed where the bss needs to go.
1036 retval = set_brk(elf_bss, elf_brk);
1037 if (retval)
1038 goto out_free_dentry;
1039 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
1040 retval = -EFAULT; /* Nobody gets to see this, but.. */
1041 goto out_free_dentry;
1044 if (elf_interpreter) {
1045 unsigned long interp_map_addr = 0;
1047 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1048 interpreter,
1049 &interp_map_addr,
1050 load_bias, interp_elf_phdata);
1051 if (!IS_ERR((void *)elf_entry)) {
1053 * load_elf_interp() returns relocation
1054 * adjustment
1056 interp_load_addr = elf_entry;
1057 elf_entry += loc->interp_elf_ex.e_entry;
1059 if (BAD_ADDR(elf_entry)) {
1060 retval = IS_ERR((void *)elf_entry) ?
1061 (int)elf_entry : -EINVAL;
1062 goto out_free_dentry;
1064 reloc_func_desc = interp_load_addr;
1066 allow_write_access(interpreter);
1067 fput(interpreter);
1068 kfree(elf_interpreter);
1069 } else {
1070 elf_entry = loc->elf_ex.e_entry;
1071 if (BAD_ADDR(elf_entry)) {
1072 retval = -EINVAL;
1073 goto out_free_dentry;
1077 kfree(interp_elf_phdata);
1078 kfree(elf_phdata);
1080 set_binfmt(&elf_format);
1082 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1083 retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
1084 if (retval < 0)
1085 goto out;
1086 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1088 retval = create_elf_tables(bprm, &loc->elf_ex,
1089 load_addr, interp_load_addr);
1090 if (retval < 0)
1091 goto out;
1092 /* N.B. passed_fileno might not be initialized? */
1093 current->mm->end_code = end_code;
1094 current->mm->start_code = start_code;
1095 current->mm->start_data = start_data;
1096 current->mm->end_data = end_data;
1097 current->mm->start_stack = bprm->p;
1099 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
1101 * For architectures with ELF randomization, when executing
1102 * a loader directly (i.e. no interpreter listed in ELF
1103 * headers), move the brk area out of the mmap region
1104 * (since it grows up, and may collide early with the stack
1105 * growing down), and into the unused ELF_ET_DYN_BASE region.
1107 if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
1108 loc->elf_ex.e_type == ET_DYN && !interpreter)
1109 current->mm->brk = current->mm->start_brk =
1110 ELF_ET_DYN_BASE;
1112 current->mm->brk = current->mm->start_brk =
1113 arch_randomize_brk(current->mm);
1114 #ifdef compat_brk_randomized
1115 current->brk_randomized = 1;
1116 #endif
1119 if (current->personality & MMAP_PAGE_ZERO) {
1120 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1121 and some applications "depend" upon this behavior.
1122 Since we do not have the power to recompile these, we
1123 emulate the SVr4 behavior. Sigh. */
1124 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1125 MAP_FIXED | MAP_PRIVATE, 0);
1128 #ifdef ELF_PLAT_INIT
1130 * The ABI may specify that certain registers be set up in special
1131 * ways (on i386 %edx is the address of a DT_FINI function, for
1132 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1133 * that the e_entry field is the address of the function descriptor
1134 * for the startup routine, rather than the address of the startup
1135 * routine itself. This macro performs whatever initialization to
1136 * the regs structure is required as well as any relocations to the
1137 * function descriptor entries when executing dynamically links apps.
1139 ELF_PLAT_INIT(regs, reloc_func_desc);
1140 #endif
1142 start_thread(regs, elf_entry, bprm->p);
1143 retval = 0;
1144 out:
1145 kfree(loc);
1146 out_ret:
1147 return retval;
1149 /* error cleanup */
1150 out_free_dentry:
1151 kfree(interp_elf_phdata);
1152 allow_write_access(interpreter);
1153 if (interpreter)
1154 fput(interpreter);
1155 out_free_interp:
1156 kfree(elf_interpreter);
1157 out_free_ph:
1158 kfree(elf_phdata);
1159 goto out;
1162 #ifdef CONFIG_USELIB
1163 /* This is really simpleminded and specialized - we are loading an
1164 a.out library that is given an ELF header. */
1165 static int load_elf_library(struct file *file)
1167 struct elf_phdr *elf_phdata;
1168 struct elf_phdr *eppnt;
1169 unsigned long elf_bss, bss, len;
1170 int retval, error, i, j;
1171 struct elfhdr elf_ex;
1173 error = -ENOEXEC;
1174 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
1175 if (retval != sizeof(elf_ex))
1176 goto out;
1178 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1179 goto out;
1181 /* First of all, some simple consistency checks */
1182 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1183 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
1184 goto out;
1186 /* Now read in all of the header information */
1188 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1189 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1191 error = -ENOMEM;
1192 elf_phdata = kmalloc(j, GFP_KERNEL);
1193 if (!elf_phdata)
1194 goto out;
1196 eppnt = elf_phdata;
1197 error = -ENOEXEC;
1198 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1199 if (retval != j)
1200 goto out_free_ph;
1202 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1203 if ((eppnt + i)->p_type == PT_LOAD)
1204 j++;
1205 if (j != 1)
1206 goto out_free_ph;
1208 while (eppnt->p_type != PT_LOAD)
1209 eppnt++;
1211 /* Now use mmap to map the library into memory. */
1212 error = vm_mmap(file,
1213 ELF_PAGESTART(eppnt->p_vaddr),
1214 (eppnt->p_filesz +
1215 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1216 PROT_READ | PROT_WRITE | PROT_EXEC,
1217 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1218 (eppnt->p_offset -
1219 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1220 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1221 goto out_free_ph;
1223 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1224 if (padzero(elf_bss)) {
1225 error = -EFAULT;
1226 goto out_free_ph;
1229 len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
1230 bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
1231 if (bss > len) {
1232 error = vm_brk(len, bss - len);
1233 if (BAD_ADDR(error))
1234 goto out_free_ph;
1236 error = 0;
1238 out_free_ph:
1239 kfree(elf_phdata);
1240 out:
1241 return error;
1243 #endif /* #ifdef CONFIG_USELIB */
1245 #ifdef CONFIG_ELF_CORE
1247 * ELF core dumper
1249 * Modelled on fs/exec.c:aout_core_dump()
1250 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1254 * The purpose of always_dump_vma() is to make sure that special kernel mappings
1255 * that are useful for post-mortem analysis are included in every core dump.
1256 * In that way we ensure that the core dump is fully interpretable later
1257 * without matching up the same kernel and hardware config to see what PC values
1258 * meant. These special mappings include - vDSO, vsyscall, and other
1259 * architecture specific mappings
1261 static bool always_dump_vma(struct vm_area_struct *vma)
1263 /* Any vsyscall mappings? */
1264 if (vma == get_gate_vma(vma->vm_mm))
1265 return true;
1268 * Assume that all vmas with a .name op should always be dumped.
1269 * If this changes, a new vm_ops field can easily be added.
1271 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1272 return true;
1275 * arch_vma_name() returns non-NULL for special architecture mappings,
1276 * such as vDSO sections.
1278 if (arch_vma_name(vma))
1279 return true;
1281 return false;
1285 * Decide what to dump of a segment, part, all or none.
1287 static unsigned long vma_dump_size(struct vm_area_struct *vma,
1288 unsigned long mm_flags)
1290 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1292 /* always dump the vdso and vsyscall sections */
1293 if (always_dump_vma(vma))
1294 goto whole;
1296 if (vma->vm_flags & VM_DONTDUMP)
1297 return 0;
1299 /* support for DAX */
1300 if (vma_is_dax(vma)) {
1301 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1302 goto whole;
1303 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1304 goto whole;
1305 return 0;
1308 /* Hugetlb memory check */
1309 if (vma->vm_flags & VM_HUGETLB) {
1310 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1311 goto whole;
1312 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1313 goto whole;
1314 return 0;
1317 /* Do not dump I/O mapped devices or special mappings */
1318 if (vma->vm_flags & VM_IO)
1319 return 0;
1321 /* By default, dump shared memory if mapped from an anonymous file. */
1322 if (vma->vm_flags & VM_SHARED) {
1323 if (file_inode(vma->vm_file)->i_nlink == 0 ?
1324 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1325 goto whole;
1326 return 0;
1329 /* Dump segments that have been written to. */
1330 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1331 goto whole;
1332 if (vma->vm_file == NULL)
1333 return 0;
1335 if (FILTER(MAPPED_PRIVATE))
1336 goto whole;
1339 * If this looks like the beginning of a DSO or executable mapping,
1340 * check for an ELF header. If we find one, dump the first page to
1341 * aid in determining what was mapped here.
1343 if (FILTER(ELF_HEADERS) &&
1344 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
1345 u32 __user *header = (u32 __user *) vma->vm_start;
1346 u32 word;
1347 mm_segment_t fs = get_fs();
1349 * Doing it this way gets the constant folded by GCC.
1351 union {
1352 u32 cmp;
1353 char elfmag[SELFMAG];
1354 } magic;
1355 BUILD_BUG_ON(SELFMAG != sizeof word);
1356 magic.elfmag[EI_MAG0] = ELFMAG0;
1357 magic.elfmag[EI_MAG1] = ELFMAG1;
1358 magic.elfmag[EI_MAG2] = ELFMAG2;
1359 magic.elfmag[EI_MAG3] = ELFMAG3;
1361 * Switch to the user "segment" for get_user(),
1362 * then put back what elf_core_dump() had in place.
1364 set_fs(USER_DS);
1365 if (unlikely(get_user(word, header)))
1366 word = 0;
1367 set_fs(fs);
1368 if (word == magic.cmp)
1369 return PAGE_SIZE;
1372 #undef FILTER
1374 return 0;
1376 whole:
1377 return vma->vm_end - vma->vm_start;
1380 /* An ELF note in memory */
1381 struct memelfnote
1383 const char *name;
1384 int type;
1385 unsigned int datasz;
1386 void *data;
1389 static int notesize(struct memelfnote *en)
1391 int sz;
1393 sz = sizeof(struct elf_note);
1394 sz += roundup(strlen(en->name) + 1, 4);
1395 sz += roundup(en->datasz, 4);
1397 return sz;
1400 static int writenote(struct memelfnote *men, struct coredump_params *cprm)
1402 struct elf_note en;
1403 en.n_namesz = strlen(men->name) + 1;
1404 en.n_descsz = men->datasz;
1405 en.n_type = men->type;
1407 return dump_emit(cprm, &en, sizeof(en)) &&
1408 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1409 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
1412 static void fill_elf_header(struct elfhdr *elf, int segs,
1413 u16 machine, u32 flags)
1415 memset(elf, 0, sizeof(*elf));
1417 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1418 elf->e_ident[EI_CLASS] = ELF_CLASS;
1419 elf->e_ident[EI_DATA] = ELF_DATA;
1420 elf->e_ident[EI_VERSION] = EV_CURRENT;
1421 elf->e_ident[EI_OSABI] = ELF_OSABI;
1423 elf->e_type = ET_CORE;
1424 elf->e_machine = machine;
1425 elf->e_version = EV_CURRENT;
1426 elf->e_phoff = sizeof(struct elfhdr);
1427 elf->e_flags = flags;
1428 elf->e_ehsize = sizeof(struct elfhdr);
1429 elf->e_phentsize = sizeof(struct elf_phdr);
1430 elf->e_phnum = segs;
1432 return;
1435 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1437 phdr->p_type = PT_NOTE;
1438 phdr->p_offset = offset;
1439 phdr->p_vaddr = 0;
1440 phdr->p_paddr = 0;
1441 phdr->p_filesz = sz;
1442 phdr->p_memsz = 0;
1443 phdr->p_flags = 0;
1444 phdr->p_align = 0;
1445 return;
1448 static void fill_note(struct memelfnote *note, const char *name, int type,
1449 unsigned int sz, void *data)
1451 note->name = name;
1452 note->type = type;
1453 note->datasz = sz;
1454 note->data = data;
1455 return;
1459 * fill up all the fields in prstatus from the given task struct, except
1460 * registers which need to be filled up separately.
1462 static void fill_prstatus(struct elf_prstatus *prstatus,
1463 struct task_struct *p, long signr)
1465 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1466 prstatus->pr_sigpend = p->pending.signal.sig[0];
1467 prstatus->pr_sighold = p->blocked.sig[0];
1468 rcu_read_lock();
1469 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1470 rcu_read_unlock();
1471 prstatus->pr_pid = task_pid_vnr(p);
1472 prstatus->pr_pgrp = task_pgrp_vnr(p);
1473 prstatus->pr_sid = task_session_vnr(p);
1474 if (thread_group_leader(p)) {
1475 struct task_cputime cputime;
1478 * This is the record for the group leader. It shows the
1479 * group-wide total, not its individual thread total.
1481 thread_group_cputime(p, &cputime);
1482 cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
1483 cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
1484 } else {
1485 cputime_t utime, stime;
1487 task_cputime(p, &utime, &stime);
1488 cputime_to_timeval(utime, &prstatus->pr_utime);
1489 cputime_to_timeval(stime, &prstatus->pr_stime);
1491 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1492 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1495 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1496 struct mm_struct *mm)
1498 const struct cred *cred;
1499 unsigned int i, len;
1501 /* first copy the parameters from user space */
1502 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1504 len = mm->arg_end - mm->arg_start;
1505 if (len >= ELF_PRARGSZ)
1506 len = ELF_PRARGSZ-1;
1507 if (copy_from_user(&psinfo->pr_psargs,
1508 (const char __user *)mm->arg_start, len))
1509 return -EFAULT;
1510 for(i = 0; i < len; i++)
1511 if (psinfo->pr_psargs[i] == 0)
1512 psinfo->pr_psargs[i] = ' ';
1513 psinfo->pr_psargs[len] = 0;
1515 rcu_read_lock();
1516 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1517 rcu_read_unlock();
1518 psinfo->pr_pid = task_pid_vnr(p);
1519 psinfo->pr_pgrp = task_pgrp_vnr(p);
1520 psinfo->pr_sid = task_session_vnr(p);
1522 i = p->state ? ffz(~p->state) + 1 : 0;
1523 psinfo->pr_state = i;
1524 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1525 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1526 psinfo->pr_nice = task_nice(p);
1527 psinfo->pr_flag = p->flags;
1528 rcu_read_lock();
1529 cred = __task_cred(p);
1530 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1531 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
1532 rcu_read_unlock();
1533 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1535 return 0;
1538 static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1540 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1541 int i = 0;
1543 i += 2;
1544 while (auxv[i - 2] != AT_NULL);
1545 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1548 static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
1549 const siginfo_t *siginfo)
1551 mm_segment_t old_fs = get_fs();
1552 set_fs(KERNEL_DS);
1553 copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
1554 set_fs(old_fs);
1555 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1558 #define MAX_FILE_NOTE_SIZE (4*1024*1024)
1560 * Format of NT_FILE note:
1562 * long count -- how many files are mapped
1563 * long page_size -- units for file_ofs
1564 * array of [COUNT] elements of
1565 * long start
1566 * long end
1567 * long file_ofs
1568 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1570 static int fill_files_note(struct memelfnote *note)
1572 struct vm_area_struct *vma;
1573 unsigned count, size, names_ofs, remaining, n;
1574 user_long_t *data;
1575 user_long_t *start_end_ofs;
1576 char *name_base, *name_curpos;
1578 /* *Estimated* file count and total data size needed */
1579 count = current->mm->map_count;
1580 size = count * 64;
1582 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1583 alloc:
1584 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
1585 return -EINVAL;
1586 size = round_up(size, PAGE_SIZE);
1587 data = vmalloc(size);
1588 if (!data)
1589 return -ENOMEM;
1591 start_end_ofs = data + 2;
1592 name_base = name_curpos = ((char *)data) + names_ofs;
1593 remaining = size - names_ofs;
1594 count = 0;
1595 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1596 struct file *file;
1597 const char *filename;
1599 file = vma->vm_file;
1600 if (!file)
1601 continue;
1602 filename = file_path(file, name_curpos, remaining);
1603 if (IS_ERR(filename)) {
1604 if (PTR_ERR(filename) == -ENAMETOOLONG) {
1605 vfree(data);
1606 size = size * 5 / 4;
1607 goto alloc;
1609 continue;
1612 /* file_path() fills at the end, move name down */
1613 /* n = strlen(filename) + 1: */
1614 n = (name_curpos + remaining) - filename;
1615 remaining = filename - name_curpos;
1616 memmove(name_curpos, filename, n);
1617 name_curpos += n;
1619 *start_end_ofs++ = vma->vm_start;
1620 *start_end_ofs++ = vma->vm_end;
1621 *start_end_ofs++ = vma->vm_pgoff;
1622 count++;
1625 /* Now we know exact count of files, can store it */
1626 data[0] = count;
1627 data[1] = PAGE_SIZE;
1629 * Count usually is less than current->mm->map_count,
1630 * we need to move filenames down.
1632 n = current->mm->map_count - count;
1633 if (n != 0) {
1634 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1635 memmove(name_base - shift_bytes, name_base,
1636 name_curpos - name_base);
1637 name_curpos -= shift_bytes;
1640 size = name_curpos - (char *)data;
1641 fill_note(note, "CORE", NT_FILE, size, data);
1642 return 0;
1645 #ifdef CORE_DUMP_USE_REGSET
1646 #include <linux/regset.h>
1648 struct elf_thread_core_info {
1649 struct elf_thread_core_info *next;
1650 struct task_struct *task;
1651 struct elf_prstatus prstatus;
1652 struct memelfnote notes[0];
1655 struct elf_note_info {
1656 struct elf_thread_core_info *thread;
1657 struct memelfnote psinfo;
1658 struct memelfnote signote;
1659 struct memelfnote auxv;
1660 struct memelfnote files;
1661 user_siginfo_t csigdata;
1662 size_t size;
1663 int thread_notes;
1667 * When a regset has a writeback hook, we call it on each thread before
1668 * dumping user memory. On register window machines, this makes sure the
1669 * user memory backing the register data is up to date before we read it.
1671 static void do_thread_regset_writeback(struct task_struct *task,
1672 const struct user_regset *regset)
1674 if (regset->writeback)
1675 regset->writeback(task, regset, 1);
1678 #ifndef PR_REG_SIZE
1679 #define PR_REG_SIZE(S) sizeof(S)
1680 #endif
1682 #ifndef PRSTATUS_SIZE
1683 #define PRSTATUS_SIZE(S) sizeof(S)
1684 #endif
1686 #ifndef PR_REG_PTR
1687 #define PR_REG_PTR(S) (&((S)->pr_reg))
1688 #endif
1690 #ifndef SET_PR_FPVALID
1691 #define SET_PR_FPVALID(S, V) ((S)->pr_fpvalid = (V))
1692 #endif
1694 static int fill_thread_core_info(struct elf_thread_core_info *t,
1695 const struct user_regset_view *view,
1696 long signr, size_t *total)
1698 unsigned int i;
1701 * NT_PRSTATUS is the one special case, because the regset data
1702 * goes into the pr_reg field inside the note contents, rather
1703 * than being the whole note contents. We fill the reset in here.
1704 * We assume that regset 0 is NT_PRSTATUS.
1706 fill_prstatus(&t->prstatus, t->task, signr);
1707 (void) view->regsets[0].get(t->task, &view->regsets[0],
1708 0, PR_REG_SIZE(t->prstatus.pr_reg),
1709 PR_REG_PTR(&t->prstatus), NULL);
1711 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
1712 PRSTATUS_SIZE(t->prstatus), &t->prstatus);
1713 *total += notesize(&t->notes[0]);
1715 do_thread_regset_writeback(t->task, &view->regsets[0]);
1718 * Each other regset might generate a note too. For each regset
1719 * that has no core_note_type or is inactive, we leave t->notes[i]
1720 * all zero and we'll know to skip writing it later.
1722 for (i = 1; i < view->n; ++i) {
1723 const struct user_regset *regset = &view->regsets[i];
1724 do_thread_regset_writeback(t->task, regset);
1725 if (regset->core_note_type && regset->get &&
1726 (!regset->active || regset->active(t->task, regset) > 0)) {
1727 int ret;
1728 size_t size = regset->n * regset->size;
1729 void *data = kzalloc(size, GFP_KERNEL);
1730 if (unlikely(!data))
1731 return 0;
1732 ret = regset->get(t->task, regset,
1733 0, size, data, NULL);
1734 if (unlikely(ret))
1735 kfree(data);
1736 else {
1737 if (regset->core_note_type != NT_PRFPREG)
1738 fill_note(&t->notes[i], "LINUX",
1739 regset->core_note_type,
1740 size, data);
1741 else {
1742 SET_PR_FPVALID(&t->prstatus, 1);
1743 fill_note(&t->notes[i], "CORE",
1744 NT_PRFPREG, size, data);
1746 *total += notesize(&t->notes[i]);
1751 return 1;
1754 static int fill_note_info(struct elfhdr *elf, int phdrs,
1755 struct elf_note_info *info,
1756 const siginfo_t *siginfo, struct pt_regs *regs)
1758 struct task_struct *dump_task = current;
1759 const struct user_regset_view *view = task_user_regset_view(dump_task);
1760 struct elf_thread_core_info *t;
1761 struct elf_prpsinfo *psinfo;
1762 struct core_thread *ct;
1763 unsigned int i;
1765 info->size = 0;
1766 info->thread = NULL;
1768 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1769 if (psinfo == NULL) {
1770 info->psinfo.data = NULL; /* So we don't free this wrongly */
1771 return 0;
1774 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1777 * Figure out how many notes we're going to need for each thread.
1779 info->thread_notes = 0;
1780 for (i = 0; i < view->n; ++i)
1781 if (view->regsets[i].core_note_type != 0)
1782 ++info->thread_notes;
1785 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1786 * since it is our one special case.
1788 if (unlikely(info->thread_notes == 0) ||
1789 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1790 WARN_ON(1);
1791 return 0;
1795 * Initialize the ELF file header.
1797 fill_elf_header(elf, phdrs,
1798 view->e_machine, view->e_flags);
1801 * Allocate a structure for each thread.
1803 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1804 t = kzalloc(offsetof(struct elf_thread_core_info,
1805 notes[info->thread_notes]),
1806 GFP_KERNEL);
1807 if (unlikely(!t))
1808 return 0;
1810 t->task = ct->task;
1811 if (ct->task == dump_task || !info->thread) {
1812 t->next = info->thread;
1813 info->thread = t;
1814 } else {
1816 * Make sure to keep the original task at
1817 * the head of the list.
1819 t->next = info->thread->next;
1820 info->thread->next = t;
1825 * Now fill in each thread's information.
1827 for (t = info->thread; t != NULL; t = t->next)
1828 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
1829 return 0;
1832 * Fill in the two process-wide notes.
1834 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1835 info->size += notesize(&info->psinfo);
1837 fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
1838 info->size += notesize(&info->signote);
1840 fill_auxv_note(&info->auxv, current->mm);
1841 info->size += notesize(&info->auxv);
1843 if (fill_files_note(&info->files) == 0)
1844 info->size += notesize(&info->files);
1846 return 1;
1849 static size_t get_note_info_size(struct elf_note_info *info)
1851 return info->size;
1855 * Write all the notes for each thread. When writing the first thread, the
1856 * process-wide notes are interleaved after the first thread-specific note.
1858 static int write_note_info(struct elf_note_info *info,
1859 struct coredump_params *cprm)
1861 bool first = true;
1862 struct elf_thread_core_info *t = info->thread;
1864 do {
1865 int i;
1867 if (!writenote(&t->notes[0], cprm))
1868 return 0;
1870 if (first && !writenote(&info->psinfo, cprm))
1871 return 0;
1872 if (first && !writenote(&info->signote, cprm))
1873 return 0;
1874 if (first && !writenote(&info->auxv, cprm))
1875 return 0;
1876 if (first && info->files.data &&
1877 !writenote(&info->files, cprm))
1878 return 0;
1880 for (i = 1; i < info->thread_notes; ++i)
1881 if (t->notes[i].data &&
1882 !writenote(&t->notes[i], cprm))
1883 return 0;
1885 first = false;
1886 t = t->next;
1887 } while (t);
1889 return 1;
1892 static void free_note_info(struct elf_note_info *info)
1894 struct elf_thread_core_info *threads = info->thread;
1895 while (threads) {
1896 unsigned int i;
1897 struct elf_thread_core_info *t = threads;
1898 threads = t->next;
1899 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1900 for (i = 1; i < info->thread_notes; ++i)
1901 kfree(t->notes[i].data);
1902 kfree(t);
1904 kfree(info->psinfo.data);
1905 vfree(info->files.data);
1908 #else
1910 /* Here is the structure in which status of each thread is captured. */
1911 struct elf_thread_status
1913 struct list_head list;
1914 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1915 elf_fpregset_t fpu; /* NT_PRFPREG */
1916 struct task_struct *thread;
1917 #ifdef ELF_CORE_COPY_XFPREGS
1918 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1919 #endif
1920 struct memelfnote notes[3];
1921 int num_notes;
1925 * In order to add the specific thread information for the elf file format,
1926 * we need to keep a linked list of every threads pr_status and then create
1927 * a single section for them in the final core file.
1929 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1931 int sz = 0;
1932 struct task_struct *p = t->thread;
1933 t->num_notes = 0;
1935 fill_prstatus(&t->prstatus, p, signr);
1936 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1938 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1939 &(t->prstatus));
1940 t->num_notes++;
1941 sz += notesize(&t->notes[0]);
1943 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1944 &t->fpu))) {
1945 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1946 &(t->fpu));
1947 t->num_notes++;
1948 sz += notesize(&t->notes[1]);
1951 #ifdef ELF_CORE_COPY_XFPREGS
1952 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1953 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1954 sizeof(t->xfpu), &t->xfpu);
1955 t->num_notes++;
1956 sz += notesize(&t->notes[2]);
1958 #endif
1959 return sz;
1962 struct elf_note_info {
1963 struct memelfnote *notes;
1964 struct memelfnote *notes_files;
1965 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1966 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1967 struct list_head thread_list;
1968 elf_fpregset_t *fpu;
1969 #ifdef ELF_CORE_COPY_XFPREGS
1970 elf_fpxregset_t *xfpu;
1971 #endif
1972 user_siginfo_t csigdata;
1973 int thread_status_size;
1974 int numnote;
1977 static int elf_note_info_init(struct elf_note_info *info)
1979 memset(info, 0, sizeof(*info));
1980 INIT_LIST_HEAD(&info->thread_list);
1982 /* Allocate space for ELF notes */
1983 info->notes = kmalloc(8 * sizeof(struct memelfnote), GFP_KERNEL);
1984 if (!info->notes)
1985 return 0;
1986 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
1987 if (!info->psinfo)
1988 return 0;
1989 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
1990 if (!info->prstatus)
1991 return 0;
1992 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
1993 if (!info->fpu)
1994 return 0;
1995 #ifdef ELF_CORE_COPY_XFPREGS
1996 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
1997 if (!info->xfpu)
1998 return 0;
1999 #endif
2000 return 1;
2003 static int fill_note_info(struct elfhdr *elf, int phdrs,
2004 struct elf_note_info *info,
2005 const siginfo_t *siginfo, struct pt_regs *regs)
2007 struct list_head *t;
2008 struct core_thread *ct;
2009 struct elf_thread_status *ets;
2011 if (!elf_note_info_init(info))
2012 return 0;
2014 for (ct = current->mm->core_state->dumper.next;
2015 ct; ct = ct->next) {
2016 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
2017 if (!ets)
2018 return 0;
2020 ets->thread = ct->task;
2021 list_add(&ets->list, &info->thread_list);
2024 list_for_each(t, &info->thread_list) {
2025 int sz;
2027 ets = list_entry(t, struct elf_thread_status, list);
2028 sz = elf_dump_thread_status(siginfo->si_signo, ets);
2029 info->thread_status_size += sz;
2031 /* now collect the dump for the current */
2032 memset(info->prstatus, 0, sizeof(*info->prstatus));
2033 fill_prstatus(info->prstatus, current, siginfo->si_signo);
2034 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
2036 /* Set up header */
2037 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
2040 * Set up the notes in similar form to SVR4 core dumps made
2041 * with info from their /proc.
2044 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2045 sizeof(*info->prstatus), info->prstatus);
2046 fill_psinfo(info->psinfo, current->group_leader, current->mm);
2047 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2048 sizeof(*info->psinfo), info->psinfo);
2050 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
2051 fill_auxv_note(info->notes + 3, current->mm);
2052 info->numnote = 4;
2054 if (fill_files_note(info->notes + info->numnote) == 0) {
2055 info->notes_files = info->notes + info->numnote;
2056 info->numnote++;
2059 /* Try to dump the FPU. */
2060 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
2061 info->fpu);
2062 if (info->prstatus->pr_fpvalid)
2063 fill_note(info->notes + info->numnote++,
2064 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
2065 #ifdef ELF_CORE_COPY_XFPREGS
2066 if (elf_core_copy_task_xfpregs(current, info->xfpu))
2067 fill_note(info->notes + info->numnote++,
2068 "LINUX", ELF_CORE_XFPREG_TYPE,
2069 sizeof(*info->xfpu), info->xfpu);
2070 #endif
2072 return 1;
2075 static size_t get_note_info_size(struct elf_note_info *info)
2077 int sz = 0;
2078 int i;
2080 for (i = 0; i < info->numnote; i++)
2081 sz += notesize(info->notes + i);
2083 sz += info->thread_status_size;
2085 return sz;
2088 static int write_note_info(struct elf_note_info *info,
2089 struct coredump_params *cprm)
2091 int i;
2092 struct list_head *t;
2094 for (i = 0; i < info->numnote; i++)
2095 if (!writenote(info->notes + i, cprm))
2096 return 0;
2098 /* write out the thread status notes section */
2099 list_for_each(t, &info->thread_list) {
2100 struct elf_thread_status *tmp =
2101 list_entry(t, struct elf_thread_status, list);
2103 for (i = 0; i < tmp->num_notes; i++)
2104 if (!writenote(&tmp->notes[i], cprm))
2105 return 0;
2108 return 1;
2111 static void free_note_info(struct elf_note_info *info)
2113 while (!list_empty(&info->thread_list)) {
2114 struct list_head *tmp = info->thread_list.next;
2115 list_del(tmp);
2116 kfree(list_entry(tmp, struct elf_thread_status, list));
2119 /* Free data possibly allocated by fill_files_note(): */
2120 if (info->notes_files)
2121 vfree(info->notes_files->data);
2123 kfree(info->prstatus);
2124 kfree(info->psinfo);
2125 kfree(info->notes);
2126 kfree(info->fpu);
2127 #ifdef ELF_CORE_COPY_XFPREGS
2128 kfree(info->xfpu);
2129 #endif
2132 #endif
2134 static struct vm_area_struct *first_vma(struct task_struct *tsk,
2135 struct vm_area_struct *gate_vma)
2137 struct vm_area_struct *ret = tsk->mm->mmap;
2139 if (ret)
2140 return ret;
2141 return gate_vma;
2144 * Helper function for iterating across a vma list. It ensures that the caller
2145 * will visit `gate_vma' prior to terminating the search.
2147 static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
2148 struct vm_area_struct *gate_vma)
2150 struct vm_area_struct *ret;
2152 ret = this_vma->vm_next;
2153 if (ret)
2154 return ret;
2155 if (this_vma == gate_vma)
2156 return NULL;
2157 return gate_vma;
2160 static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2161 elf_addr_t e_shoff, int segs)
2163 elf->e_shoff = e_shoff;
2164 elf->e_shentsize = sizeof(*shdr4extnum);
2165 elf->e_shnum = 1;
2166 elf->e_shstrndx = SHN_UNDEF;
2168 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2170 shdr4extnum->sh_type = SHT_NULL;
2171 shdr4extnum->sh_size = elf->e_shnum;
2172 shdr4extnum->sh_link = elf->e_shstrndx;
2173 shdr4extnum->sh_info = segs;
2177 * Actual dumper
2179 * This is a two-pass process; first we find the offsets of the bits,
2180 * and then they are actually written out. If we run out of core limit
2181 * we just truncate.
2183 static int elf_core_dump(struct coredump_params *cprm)
2185 int has_dumped = 0;
2186 mm_segment_t fs;
2187 int segs, i;
2188 size_t vma_data_size = 0;
2189 struct vm_area_struct *vma, *gate_vma;
2190 struct elfhdr *elf = NULL;
2191 loff_t offset = 0, dataoff;
2192 struct elf_note_info info = { };
2193 struct elf_phdr *phdr4note = NULL;
2194 struct elf_shdr *shdr4extnum = NULL;
2195 Elf_Half e_phnum;
2196 elf_addr_t e_shoff;
2197 elf_addr_t *vma_filesz = NULL;
2200 * We no longer stop all VM operations.
2202 * This is because those proceses that could possibly change map_count
2203 * or the mmap / vma pages are now blocked in do_exit on current
2204 * finishing this core dump.
2206 * Only ptrace can touch these memory addresses, but it doesn't change
2207 * the map_count or the pages allocated. So no possibility of crashing
2208 * exists while dumping the mm->vm_next areas to the core file.
2211 /* alloc memory for large data structures: too large to be on stack */
2212 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
2213 if (!elf)
2214 goto out;
2216 * The number of segs are recored into ELF header as 16bit value.
2217 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2219 segs = current->mm->map_count;
2220 segs += elf_core_extra_phdrs();
2222 gate_vma = get_gate_vma(current->mm);
2223 if (gate_vma != NULL)
2224 segs++;
2226 /* for notes section */
2227 segs++;
2229 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2230 * this, kernel supports extended numbering. Have a look at
2231 * include/linux/elf.h for further information. */
2232 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2235 * Collect all the non-memory information about the process for the
2236 * notes. This also sets up the file header.
2238 if (!fill_note_info(elf, e_phnum, &info, cprm->siginfo, cprm->regs))
2239 goto cleanup;
2241 has_dumped = 1;
2243 fs = get_fs();
2244 set_fs(KERNEL_DS);
2246 offset += sizeof(*elf); /* Elf header */
2247 offset += segs * sizeof(struct elf_phdr); /* Program headers */
2249 /* Write notes phdr entry */
2251 size_t sz = get_note_info_size(&info);
2253 sz += elf_coredump_extra_notes_size();
2255 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2256 if (!phdr4note)
2257 goto end_coredump;
2259 fill_elf_note_phdr(phdr4note, sz, offset);
2260 offset += sz;
2263 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2265 vma_filesz = kmalloc_array(segs - 1, sizeof(*vma_filesz), GFP_KERNEL);
2266 if (!vma_filesz)
2267 goto end_coredump;
2269 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2270 vma = next_vma(vma, gate_vma)) {
2271 unsigned long dump_size;
2273 dump_size = vma_dump_size(vma, cprm->mm_flags);
2274 vma_filesz[i++] = dump_size;
2275 vma_data_size += dump_size;
2278 offset += vma_data_size;
2279 offset += elf_core_extra_data_size();
2280 e_shoff = offset;
2282 if (e_phnum == PN_XNUM) {
2283 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2284 if (!shdr4extnum)
2285 goto end_coredump;
2286 fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
2289 offset = dataoff;
2291 if (!dump_emit(cprm, elf, sizeof(*elf)))
2292 goto end_coredump;
2294 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
2295 goto end_coredump;
2297 /* Write program headers for segments dump */
2298 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2299 vma = next_vma(vma, gate_vma)) {
2300 struct elf_phdr phdr;
2302 phdr.p_type = PT_LOAD;
2303 phdr.p_offset = offset;
2304 phdr.p_vaddr = vma->vm_start;
2305 phdr.p_paddr = 0;
2306 phdr.p_filesz = vma_filesz[i++];
2307 phdr.p_memsz = vma->vm_end - vma->vm_start;
2308 offset += phdr.p_filesz;
2309 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
2310 if (vma->vm_flags & VM_WRITE)
2311 phdr.p_flags |= PF_W;
2312 if (vma->vm_flags & VM_EXEC)
2313 phdr.p_flags |= PF_X;
2314 phdr.p_align = ELF_EXEC_PAGESIZE;
2316 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
2317 goto end_coredump;
2320 if (!elf_core_write_extra_phdrs(cprm, offset))
2321 goto end_coredump;
2323 /* write out the notes section */
2324 if (!write_note_info(&info, cprm))
2325 goto end_coredump;
2327 if (elf_coredump_extra_notes_write(cprm))
2328 goto end_coredump;
2330 /* Align to page */
2331 if (!dump_skip(cprm, dataoff - cprm->written))
2332 goto end_coredump;
2334 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2335 vma = next_vma(vma, gate_vma)) {
2336 unsigned long addr;
2337 unsigned long end;
2339 end = vma->vm_start + vma_filesz[i++];
2341 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
2342 struct page *page;
2343 int stop;
2345 page = get_dump_page(addr);
2346 if (page) {
2347 void *kaddr = kmap(page);
2348 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
2349 kunmap(page);
2350 page_cache_release(page);
2351 } else
2352 stop = !dump_skip(cprm, PAGE_SIZE);
2353 if (stop)
2354 goto end_coredump;
2357 dump_truncate(cprm);
2359 if (!elf_core_write_extra_data(cprm))
2360 goto end_coredump;
2362 if (e_phnum == PN_XNUM) {
2363 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
2364 goto end_coredump;
2367 end_coredump:
2368 set_fs(fs);
2370 cleanup:
2371 free_note_info(&info);
2372 kfree(shdr4extnum);
2373 kfree(vma_filesz);
2374 kfree(phdr4note);
2375 kfree(elf);
2376 out:
2377 return has_dumped;
2380 #endif /* CONFIG_ELF_CORE */
2382 static int __init init_elf_binfmt(void)
2384 register_binfmt(&elf_format);
2385 return 0;
2388 static void __exit exit_elf_binfmt(void)
2390 /* Remove the COFF and ELF loaders. */
2391 unregister_binfmt(&elf_format);
2394 core_initcall(init_elf_binfmt);
2395 module_exit(exit_elf_binfmt);
2396 MODULE_LICENSE("GPL");