4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * #!-checking implemented by tytso.
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
25 #include <linux/config.h>
26 #include <linux/slab.h>
27 #include <linux/file.h>
28 #include <linux/mman.h>
29 #include <linux/a.out.h>
30 #include <linux/stat.h>
31 #include <linux/fcntl.h>
32 #include <linux/smp_lock.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/highmem.h>
36 #include <linux/spinlock.h>
37 #include <linux/personality.h>
38 #include <linux/binfmts.h>
39 #include <linux/swap.h>
40 #include <linux/utsname.h>
41 #include <linux/module.h>
42 #include <linux/namei.h>
43 #include <linux/proc_fs.h>
44 #include <linux/ptrace.h>
45 #include <linux/mount.h>
46 #include <linux/security.h>
47 #include <linux/syscalls.h>
48 #include <linux/rmap.h>
50 #include <asm/uaccess.h>
51 #include <asm/mmu_context.h>
54 #include <linux/kmod.h>
58 char core_pattern
[65] = "core";
59 /* The maximal length of core_pattern is also specified in sysctl.c */
61 static struct linux_binfmt
*formats
;
62 static rwlock_t binfmt_lock
= RW_LOCK_UNLOCKED
;
64 int register_binfmt(struct linux_binfmt
* fmt
)
66 struct linux_binfmt
** tmp
= &formats
;
72 write_lock(&binfmt_lock
);
75 write_unlock(&binfmt_lock
);
82 write_unlock(&binfmt_lock
);
86 EXPORT_SYMBOL(register_binfmt
);
88 int unregister_binfmt(struct linux_binfmt
* fmt
)
90 struct linux_binfmt
** tmp
= &formats
;
92 write_lock(&binfmt_lock
);
96 write_unlock(&binfmt_lock
);
101 write_unlock(&binfmt_lock
);
105 EXPORT_SYMBOL(unregister_binfmt
);
107 static inline void put_binfmt(struct linux_binfmt
* fmt
)
109 module_put(fmt
->module
);
113 * Note that a shared library must be both readable and executable due to
116 * Also note that we take the address to load from from the file itself.
118 asmlinkage
long sys_uselib(const char __user
* library
)
124 nd
.intent
.open
.flags
= FMODE_READ
;
125 error
= __user_walk(library
, LOOKUP_FOLLOW
|LOOKUP_OPEN
, &nd
);
130 if (!S_ISREG(nd
.dentry
->d_inode
->i_mode
))
133 error
= permission(nd
.dentry
->d_inode
, MAY_READ
| MAY_EXEC
, &nd
);
137 file
= dentry_open(nd
.dentry
, nd
.mnt
, O_RDONLY
);
138 error
= PTR_ERR(file
);
144 struct linux_binfmt
* fmt
;
146 read_lock(&binfmt_lock
);
147 for (fmt
= formats
; fmt
; fmt
= fmt
->next
) {
148 if (!fmt
->load_shlib
)
150 if (!try_module_get(fmt
->module
))
152 read_unlock(&binfmt_lock
);
153 error
= fmt
->load_shlib(file
);
154 read_lock(&binfmt_lock
);
156 if (error
!= -ENOEXEC
)
159 read_unlock(&binfmt_lock
);
170 * count() counts the number of strings in array ARGV.
172 static int count(char __user
* __user
* argv
, int max
)
180 if (get_user(p
, argv
))
193 * 'copy_strings()' copies argument/environment strings from user
194 * memory to free pages in kernel mem. These are in a format ready
195 * to be put directly into the top of new user memory.
197 int copy_strings(int argc
,char __user
* __user
* argv
, struct linux_binprm
*bprm
)
199 struct page
*kmapped_page
= NULL
;
208 if (get_user(str
, argv
+argc
) ||
209 !(len
= strnlen_user(str
, bprm
->p
))) {
220 /* XXX: add architecture specific overflow check here. */
225 int offset
, bytes_to_copy
;
228 offset
= pos
% PAGE_SIZE
;
230 page
= bprm
->page
[i
];
233 page
= alloc_page(GFP_HIGHUSER
);
234 bprm
->page
[i
] = page
;
242 if (page
!= kmapped_page
) {
244 kunmap(kmapped_page
);
246 kaddr
= kmap(kmapped_page
);
249 memset(kaddr
, 0, offset
);
250 bytes_to_copy
= PAGE_SIZE
- offset
;
251 if (bytes_to_copy
> len
) {
254 memset(kaddr
+offset
+len
, 0,
255 PAGE_SIZE
-offset
-len
);
257 err
= copy_from_user(kaddr
+offset
, str
, bytes_to_copy
);
263 pos
+= bytes_to_copy
;
264 str
+= bytes_to_copy
;
265 len
-= bytes_to_copy
;
271 kunmap(kmapped_page
);
276 * Like copy_strings, but get argv and its values from kernel memory.
278 int copy_strings_kernel(int argc
,char ** argv
, struct linux_binprm
*bprm
)
281 mm_segment_t oldfs
= get_fs();
283 r
= copy_strings(argc
, (char __user
* __user
*)argv
, bprm
);
288 EXPORT_SYMBOL(copy_strings_kernel
);
292 * This routine is used to map in a page into an address space: needed by
293 * execve() for the initial stack and environment pages.
295 * vma->vm_mm->mmap_sem is held for writing.
297 void install_arg_page(struct vm_area_struct
*vma
,
298 struct page
*page
, unsigned long address
)
300 struct mm_struct
*mm
= vma
->vm_mm
;
305 if (unlikely(anon_vma_prepare(vma
)))
308 flush_dcache_page(page
);
309 pgd
= pgd_offset(mm
, address
);
311 spin_lock(&mm
->page_table_lock
);
312 pmd
= pmd_alloc(mm
, pgd
, address
);
315 pte
= pte_alloc_map(mm
, pmd
, address
);
318 if (!pte_none(*pte
)) {
323 lru_cache_add_active(page
);
324 set_pte(pte
, pte_mkdirty(pte_mkwrite(mk_pte(
325 page
, vma
->vm_page_prot
))));
326 page_add_anon_rmap(page
, vma
, address
);
328 spin_unlock(&mm
->page_table_lock
);
330 /* no need for flush_tlb */
333 spin_unlock(&mm
->page_table_lock
);
336 force_sig(SIGKILL
, current
);
339 int setup_arg_pages(struct linux_binprm
*bprm
, int executable_stack
)
341 unsigned long stack_base
;
342 struct vm_area_struct
*mpnt
;
343 struct mm_struct
*mm
= current
->mm
;
347 #ifdef CONFIG_STACK_GROWSUP
348 /* Move the argument and environment strings to the bottom of the
354 /* Start by shifting all the pages down */
356 for (j
= 0; j
< MAX_ARG_PAGES
; j
++) {
357 struct page
*page
= bprm
->page
[j
];
360 bprm
->page
[i
++] = page
;
363 /* Now move them within their pages */
364 offset
= bprm
->p
% PAGE_SIZE
;
365 to
= kmap(bprm
->page
[0]);
366 for (j
= 1; j
< i
; j
++) {
367 memmove(to
, to
+ offset
, PAGE_SIZE
- offset
);
368 from
= kmap(bprm
->page
[j
]);
369 memcpy(to
+ PAGE_SIZE
- offset
, from
, offset
);
370 kunmap(bprm
->page
[j
- 1]);
373 memmove(to
, to
+ offset
, PAGE_SIZE
- offset
);
374 kunmap(bprm
->page
[j
- 1]);
376 /* Adjust bprm->p to point to the end of the strings. */
377 bprm
->p
= PAGE_SIZE
* i
- offset
;
379 /* Limit stack size to 1GB */
380 stack_base
= current
->rlim
[RLIMIT_STACK
].rlim_max
;
381 if (stack_base
> (1 << 30))
382 stack_base
= 1 << 30;
383 stack_base
= PAGE_ALIGN(STACK_TOP
- stack_base
);
385 mm
->arg_start
= stack_base
;
386 arg_size
= i
<< PAGE_SHIFT
;
388 /* zero pages that were copied above */
389 while (i
< MAX_ARG_PAGES
)
390 bprm
->page
[i
++] = NULL
;
392 stack_base
= STACK_TOP
- MAX_ARG_PAGES
* PAGE_SIZE
;
393 mm
->arg_start
= bprm
->p
+ stack_base
;
394 arg_size
= STACK_TOP
- (PAGE_MASK
& (unsigned long) mm
->arg_start
);
397 bprm
->p
+= stack_base
;
399 bprm
->loader
+= stack_base
;
400 bprm
->exec
+= stack_base
;
402 mpnt
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
406 if (security_vm_enough_memory(arg_size
>> PAGE_SHIFT
)) {
407 kmem_cache_free(vm_area_cachep
, mpnt
);
411 memset(mpnt
, 0, sizeof(*mpnt
));
413 down_write(&mm
->mmap_sem
);
416 #ifdef CONFIG_STACK_GROWSUP
417 mpnt
->vm_start
= stack_base
;
418 mpnt
->vm_end
= PAGE_MASK
&
419 (PAGE_SIZE
- 1 + (unsigned long) bprm
->p
);
421 mpnt
->vm_start
= PAGE_MASK
& (unsigned long) bprm
->p
;
422 mpnt
->vm_end
= STACK_TOP
;
424 /* Adjust stack execute permissions; explicitly enable
425 * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X
426 * and leave alone (arch default) otherwise. */
427 if (unlikely(executable_stack
== EXSTACK_ENABLE_X
))
428 mpnt
->vm_flags
= VM_STACK_FLAGS
| VM_EXEC
;
429 else if (executable_stack
== EXSTACK_DISABLE_X
)
430 mpnt
->vm_flags
= VM_STACK_FLAGS
& ~VM_EXEC
;
432 mpnt
->vm_flags
= VM_STACK_FLAGS
;
433 mpnt
->vm_flags
|= mm
->def_flags
;
434 mpnt
->vm_page_prot
= protection_map
[mpnt
->vm_flags
& 0x7];
435 insert_vm_struct(mm
, mpnt
);
436 mm
->stack_vm
= mm
->total_vm
= vma_pages(mpnt
);
439 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
440 struct page
*page
= bprm
->page
[i
];
442 bprm
->page
[i
] = NULL
;
443 install_arg_page(mpnt
, page
, stack_base
);
445 stack_base
+= PAGE_SIZE
;
447 up_write(&mm
->mmap_sem
);
452 EXPORT_SYMBOL(setup_arg_pages
);
454 #define free_arg_pages(bprm) do { } while (0)
458 static inline void free_arg_pages(struct linux_binprm
*bprm
)
462 for (i
= 0; i
< MAX_ARG_PAGES
; i
++) {
464 __free_page(bprm
->page
[i
]);
465 bprm
->page
[i
] = NULL
;
469 #endif /* CONFIG_MMU */
471 struct file
*open_exec(const char *name
)
477 nd
.intent
.open
.flags
= FMODE_READ
;
478 err
= path_lookup(name
, LOOKUP_FOLLOW
|LOOKUP_OPEN
, &nd
);
482 struct inode
*inode
= nd
.dentry
->d_inode
;
483 file
= ERR_PTR(-EACCES
);
484 if (!(nd
.mnt
->mnt_flags
& MNT_NOEXEC
) &&
485 S_ISREG(inode
->i_mode
)) {
486 int err
= permission(inode
, MAY_EXEC
, &nd
);
487 if (!err
&& !(inode
->i_mode
& 0111))
491 file
= dentry_open(nd
.dentry
, nd
.mnt
, O_RDONLY
);
493 err
= deny_write_access(file
);
508 EXPORT_SYMBOL(open_exec
);
510 int kernel_read(struct file
*file
, unsigned long offset
,
511 char *addr
, unsigned long count
)
519 /* The cast to a user pointer is valid due to the set_fs() */
520 result
= vfs_read(file
, (void __user
*)addr
, count
, &pos
);
525 EXPORT_SYMBOL(kernel_read
);
527 static int exec_mmap(struct mm_struct
*mm
)
529 struct task_struct
*tsk
;
530 struct mm_struct
* old_mm
, *active_mm
;
532 /* Add it to the list of mm's */
533 spin_lock(&mmlist_lock
);
534 list_add(&mm
->mmlist
, &init_mm
.mmlist
);
536 spin_unlock(&mmlist_lock
);
538 /* Notify parent that we're no longer interested in the old VM */
540 old_mm
= current
->mm
;
541 mm_release(tsk
, old_mm
);
544 active_mm
= tsk
->active_mm
;
547 activate_mm(active_mm
, mm
);
549 arch_pick_mmap_layout(mm
);
551 if (active_mm
!= old_mm
) BUG();
560 * This function makes sure the current process has its own signal table,
561 * so that flush_signal_handlers can later reset the handlers without
562 * disturbing other processes. (Other processes might share the signal
563 * table via the CLONE_SIGHAND option to clone().)
565 static inline int de_thread(struct task_struct
*tsk
)
567 struct signal_struct
*newsig
, *oldsig
= tsk
->signal
;
568 struct sighand_struct
*newsighand
, *oldsighand
= tsk
->sighand
;
569 spinlock_t
*lock
= &oldsighand
->siglock
;
573 * If we don't share sighandlers, then we aren't sharing anything
574 * and we can just re-use it all.
576 if (atomic_read(&oldsighand
->count
) <= 1)
579 newsighand
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
583 spin_lock_init(&newsighand
->siglock
);
584 atomic_set(&newsighand
->count
, 1);
585 memcpy(newsighand
->action
, oldsighand
->action
, sizeof(newsighand
->action
));
588 * See if we need to allocate a new signal structure
591 if (atomic_read(&oldsig
->count
) > 1) {
592 newsig
= kmem_cache_alloc(signal_cachep
, GFP_KERNEL
);
594 kmem_cache_free(sighand_cachep
, newsighand
);
597 atomic_set(&newsig
->count
, 1);
598 newsig
->group_exit
= 0;
599 newsig
->group_exit_code
= 0;
600 newsig
->group_exit_task
= NULL
;
601 newsig
->group_stop_count
= 0;
602 newsig
->curr_target
= NULL
;
603 init_sigpending(&newsig
->shared_pending
);
604 INIT_LIST_HEAD(&newsig
->posix_timers
);
606 newsig
->tty
= oldsig
->tty
;
607 newsig
->pgrp
= oldsig
->pgrp
;
608 newsig
->session
= oldsig
->session
;
609 newsig
->leader
= oldsig
->leader
;
610 newsig
->tty_old_pgrp
= oldsig
->tty_old_pgrp
;
613 if (thread_group_empty(current
))
614 goto no_thread_group
;
617 * Kill all other threads in the thread group.
618 * We must hold tasklist_lock to call zap_other_threads.
620 read_lock(&tasklist_lock
);
622 if (oldsig
->group_exit
) {
624 * Another group action in progress, just
625 * return so that the signal is processed.
627 spin_unlock_irq(lock
);
628 read_unlock(&tasklist_lock
);
629 kmem_cache_free(sighand_cachep
, newsighand
);
631 kmem_cache_free(signal_cachep
, newsig
);
634 oldsig
->group_exit
= 1;
635 zap_other_threads(current
);
636 read_unlock(&tasklist_lock
);
639 * Account for the thread group leader hanging around:
642 if (current
->pid
== current
->tgid
)
644 while (atomic_read(&oldsig
->count
) > count
) {
645 oldsig
->group_exit_task
= current
;
646 oldsig
->notify_count
= count
;
647 __set_current_state(TASK_UNINTERRUPTIBLE
);
648 spin_unlock_irq(lock
);
652 spin_unlock_irq(lock
);
655 * At this point all other threads have exited, all we have to
656 * do is to wait for the thread group leader to become inactive,
657 * and to assume its PID:
659 if (current
->pid
!= current
->tgid
) {
660 struct task_struct
*leader
= current
->group_leader
, *parent
;
661 struct dentry
*proc_dentry1
, *proc_dentry2
;
662 unsigned long state
, ptrace
;
665 * Wait for the thread group leader to be a zombie.
666 * It should already be zombie at this point, most
669 while (leader
->state
!= TASK_ZOMBIE
)
672 spin_lock(&leader
->proc_lock
);
673 spin_lock(¤t
->proc_lock
);
674 proc_dentry1
= proc_pid_unhash(current
);
675 proc_dentry2
= proc_pid_unhash(leader
);
676 write_lock_irq(&tasklist_lock
);
678 if (leader
->tgid
!= current
->tgid
)
680 if (current
->pid
== current
->tgid
)
683 * An exec() starts a new thread group with the
684 * TGID of the previous thread group. Rehash the
685 * two threads with a switched PID, and release
686 * the former thread group leader:
688 ptrace
= leader
->ptrace
;
689 parent
= leader
->parent
;
691 ptrace_unlink(current
);
692 ptrace_unlink(leader
);
693 remove_parent(current
);
694 remove_parent(leader
);
696 switch_exec_pids(leader
, current
);
698 current
->parent
= current
->real_parent
= leader
->real_parent
;
699 leader
->parent
= leader
->real_parent
= child_reaper
;
700 current
->group_leader
= current
;
701 leader
->group_leader
= leader
;
703 add_parent(current
, current
->parent
);
704 add_parent(leader
, leader
->parent
);
706 current
->ptrace
= ptrace
;
707 __ptrace_link(current
, parent
);
710 list_del(¤t
->tasks
);
711 list_add_tail(¤t
->tasks
, &init_task
.tasks
);
712 current
->exit_signal
= SIGCHLD
;
713 state
= leader
->state
;
715 write_unlock_irq(&tasklist_lock
);
716 spin_unlock(&leader
->proc_lock
);
717 spin_unlock(¤t
->proc_lock
);
718 proc_pid_flush(proc_dentry1
);
719 proc_pid_flush(proc_dentry2
);
721 if (state
!= TASK_ZOMBIE
)
723 release_task(leader
);
728 write_lock_irq(&tasklist_lock
);
729 spin_lock(&oldsighand
->siglock
);
730 spin_lock(&newsighand
->siglock
);
732 if (current
== oldsig
->curr_target
)
733 oldsig
->curr_target
= next_thread(current
);
735 current
->signal
= newsig
;
736 current
->sighand
= newsighand
;
737 init_sigpending(¤t
->pending
);
740 spin_unlock(&newsighand
->siglock
);
741 spin_unlock(&oldsighand
->siglock
);
742 write_unlock_irq(&tasklist_lock
);
744 if (newsig
&& atomic_dec_and_test(&oldsig
->count
)) {
745 exit_itimers(oldsig
);
746 kmem_cache_free(signal_cachep
, oldsig
);
749 if (atomic_dec_and_test(&oldsighand
->count
))
750 kmem_cache_free(sighand_cachep
, oldsighand
);
752 if (!thread_group_empty(current
))
754 if (current
->tgid
!= current
->pid
)
760 * These functions flushes out all traces of the currently running executable
761 * so that a new one can be started
764 static inline void flush_old_files(struct files_struct
* files
)
768 spin_lock(&files
->file_lock
);
770 unsigned long set
, i
;
774 if (i
>= files
->max_fds
|| i
>= files
->max_fdset
)
776 set
= files
->close_on_exec
->fds_bits
[j
];
779 files
->close_on_exec
->fds_bits
[j
] = 0;
780 spin_unlock(&files
->file_lock
);
781 for ( ; set
; i
++,set
>>= 1) {
786 spin_lock(&files
->file_lock
);
789 spin_unlock(&files
->file_lock
);
792 void get_task_comm(char *buf
, struct task_struct
*tsk
)
794 /* buf must be at least sizeof(tsk->comm) in size */
796 memcpy(buf
, tsk
->comm
, sizeof(tsk
->comm
));
800 void set_task_comm(struct task_struct
*tsk
, char *buf
)
803 strlcpy(tsk
->comm
, buf
, sizeof(tsk
->comm
));
807 int flush_old_exec(struct linux_binprm
* bprm
)
811 struct files_struct
*files
;
812 char tcomm
[sizeof(current
->comm
)];
815 * Make sure we have a private signal table and that
816 * we are unassociated from the previous thread group.
818 retval
= de_thread(current
);
823 * Make sure we have private file handles. Ask the
824 * fork helper to do the work for us and the exit
825 * helper to do the cleanup of the old one.
827 files
= current
->files
; /* refcounted so safe to hold */
828 retval
= unshare_files();
832 * Release all of the old mmap stuff
834 retval
= exec_mmap(bprm
->mm
);
838 bprm
->mm
= NULL
; /* We're using it now */
840 /* This is the point of no return */
842 put_files_struct(files
);
844 current
->sas_ss_sp
= current
->sas_ss_size
= 0;
846 if (current
->euid
== current
->uid
&& current
->egid
== current
->gid
)
847 current
->mm
->dumpable
= 1;
848 name
= bprm
->filename
;
849 for (i
=0; (ch
= *(name
++)) != '\0';) {
853 if (i
< (sizeof(tcomm
) - 1))
857 set_task_comm(current
, tcomm
);
861 if (bprm
->e_uid
!= current
->euid
|| bprm
->e_gid
!= current
->egid
||
862 permission(bprm
->file
->f_dentry
->d_inode
,MAY_READ
, NULL
) ||
863 (bprm
->interp_flags
& BINPRM_FLAGS_ENFORCE_NONDUMP
))
864 current
->mm
->dumpable
= 0;
866 /* An exec changes our domain. We are no longer part of the thread
869 current
->self_exec_id
++;
871 flush_signal_handlers(current
, 0);
872 flush_old_files(current
->files
);
877 put_files_struct(current
->files
);
878 current
->files
= files
;
883 EXPORT_SYMBOL(flush_old_exec
);
886 * Fill the binprm structure from the inode.
887 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
889 int prepare_binprm(struct linux_binprm
*bprm
)
892 struct inode
* inode
= bprm
->file
->f_dentry
->d_inode
;
895 mode
= inode
->i_mode
;
897 * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
898 * vfs_permission lets a non-executable through
900 if (!(mode
& 0111)) /* with at least _one_ execute bit set */
902 if (bprm
->file
->f_op
== NULL
)
905 bprm
->e_uid
= current
->euid
;
906 bprm
->e_gid
= current
->egid
;
908 if(!(bprm
->file
->f_vfsmnt
->mnt_flags
& MNT_NOSUID
)) {
910 if (mode
& S_ISUID
) {
911 current
->personality
&= ~PER_CLEAR_ON_SETID
;
912 bprm
->e_uid
= inode
->i_uid
;
917 * If setgid is set but no group execute bit then this
918 * is a candidate for mandatory locking, not a setgid
921 if ((mode
& (S_ISGID
| S_IXGRP
)) == (S_ISGID
| S_IXGRP
)) {
922 current
->personality
&= ~PER_CLEAR_ON_SETID
;
923 bprm
->e_gid
= inode
->i_gid
;
927 /* fill in binprm security blob */
928 retval
= security_bprm_set(bprm
);
932 memset(bprm
->buf
,0,BINPRM_BUF_SIZE
);
933 return kernel_read(bprm
->file
,0,bprm
->buf
,BINPRM_BUF_SIZE
);
936 EXPORT_SYMBOL(prepare_binprm
);
938 static inline int unsafe_exec(struct task_struct
*p
)
941 if (p
->ptrace
& PT_PTRACED
) {
942 if (p
->ptrace
& PT_PTRACE_CAP
)
943 unsafe
|= LSM_UNSAFE_PTRACE_CAP
;
945 unsafe
|= LSM_UNSAFE_PTRACE
;
947 if (atomic_read(&p
->fs
->count
) > 1 ||
948 atomic_read(&p
->files
->count
) > 1 ||
949 atomic_read(&p
->sighand
->count
) > 1)
950 unsafe
|= LSM_UNSAFE_SHARE
;
955 void compute_creds(struct linux_binprm
*bprm
)
959 unsafe
= unsafe_exec(current
);
960 security_bprm_apply_creds(bprm
, unsafe
);
961 task_unlock(current
);
964 EXPORT_SYMBOL(compute_creds
);
966 void remove_arg_zero(struct linux_binprm
*bprm
)
969 unsigned long offset
;
973 offset
= bprm
->p
% PAGE_SIZE
;
976 while (bprm
->p
++, *(kaddr
+offset
++)) {
977 if (offset
!= PAGE_SIZE
)
980 kunmap_atomic(kaddr
, KM_USER0
);
982 page
= bprm
->page
[bprm
->p
/PAGE_SIZE
];
983 kaddr
= kmap_atomic(page
, KM_USER0
);
985 kunmap_atomic(kaddr
, KM_USER0
);
990 EXPORT_SYMBOL(remove_arg_zero
);
993 * cycle the list of binary formats handler, until one recognizes the image
995 int search_binary_handler(struct linux_binprm
*bprm
,struct pt_regs
*regs
)
998 struct linux_binfmt
*fmt
;
1000 /* handle /sbin/loader.. */
1002 struct exec
* eh
= (struct exec
*) bprm
->buf
;
1004 if (!bprm
->loader
&& eh
->fh
.f_magic
== 0x183 &&
1005 (eh
->fh
.f_flags
& 0x3000) == 0x3000)
1008 unsigned long loader
;
1010 allow_write_access(bprm
->file
);
1014 loader
= PAGE_SIZE
*MAX_ARG_PAGES
-sizeof(void *);
1016 file
= open_exec("/sbin/loader");
1017 retval
= PTR_ERR(file
);
1021 /* Remember if the application is TASO. */
1022 bprm
->sh_bang
= eh
->ah
.entry
< 0x100000000UL
;
1025 bprm
->loader
= loader
;
1026 retval
= prepare_binprm(bprm
);
1029 /* should call search_binary_handler recursively here,
1030 but it does not matter */
1034 retval
= security_bprm_check(bprm
);
1038 /* kernel module loader fixup */
1039 /* so we don't try to load run modprobe in kernel space. */
1041 for (try=0; try<2; try++) {
1042 read_lock(&binfmt_lock
);
1043 for (fmt
= formats
; fmt
; fmt
= fmt
->next
) {
1044 int (*fn
)(struct linux_binprm
*, struct pt_regs
*) = fmt
->load_binary
;
1047 if (!try_module_get(fmt
->module
))
1049 read_unlock(&binfmt_lock
);
1050 retval
= fn(bprm
, regs
);
1053 allow_write_access(bprm
->file
);
1057 current
->did_exec
= 1;
1060 read_lock(&binfmt_lock
);
1062 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
)
1065 read_unlock(&binfmt_lock
);
1069 read_unlock(&binfmt_lock
);
1070 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
) {
1074 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1075 if (printable(bprm
->buf
[0]) &&
1076 printable(bprm
->buf
[1]) &&
1077 printable(bprm
->buf
[2]) &&
1078 printable(bprm
->buf
[3]))
1079 break; /* -ENOEXEC */
1080 request_module("binfmt-%04x", *(unsigned short *)(&bprm
->buf
[2]));
1087 EXPORT_SYMBOL(search_binary_handler
);
1090 * sys_execve() executes a new program.
1092 int do_execve(char * filename
,
1093 char __user
*__user
*argv
,
1094 char __user
*__user
*envp
,
1095 struct pt_regs
* regs
)
1097 struct linux_binprm
*bprm
;
1102 file
= open_exec(filename
);
1104 retval
= PTR_ERR(file
);
1111 bprm
= kmalloc(sizeof(*bprm
), GFP_KERNEL
);
1114 memset(bprm
, 0, sizeof(*bprm
));
1116 bprm
->p
= PAGE_SIZE
*MAX_ARG_PAGES
-sizeof(void *);
1119 bprm
->filename
= filename
;
1120 bprm
->interp
= filename
;
1121 bprm
->mm
= mm_alloc();
1125 retval
= init_new_context(current
, bprm
->mm
);
1129 bprm
->argc
= count(argv
, bprm
->p
/ sizeof(void *));
1130 if ((retval
= bprm
->argc
) < 0)
1133 bprm
->envc
= count(envp
, bprm
->p
/ sizeof(void *));
1134 if ((retval
= bprm
->envc
) < 0)
1137 retval
= security_bprm_alloc(bprm
);
1141 retval
= prepare_binprm(bprm
);
1145 retval
= copy_strings_kernel(1, &bprm
->filename
, bprm
);
1149 bprm
->exec
= bprm
->p
;
1150 retval
= copy_strings(bprm
->envc
, envp
, bprm
);
1154 retval
= copy_strings(bprm
->argc
, argv
, bprm
);
1158 retval
= search_binary_handler(bprm
,regs
);
1160 free_arg_pages(bprm
);
1162 /* execve success */
1163 security_bprm_free(bprm
);
1169 /* Something went wrong, return the inode and free the argument pages*/
1170 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
1171 struct page
* page
= bprm
->page
[i
];
1177 security_bprm_free(bprm
);
1185 allow_write_access(bprm
->file
);
1194 EXPORT_SYMBOL(do_execve
);
1196 int set_binfmt(struct linux_binfmt
*new)
1198 struct linux_binfmt
*old
= current
->binfmt
;
1201 if (!try_module_get(new->module
))
1204 current
->binfmt
= new;
1206 module_put(old
->module
);
1210 EXPORT_SYMBOL(set_binfmt
);
1212 #define CORENAME_MAX_SIZE 64
1214 /* format_corename will inspect the pattern parameter, and output a
1215 * name into corename, which must have space for at least
1216 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1218 static void format_corename(char *corename
, const char *pattern
, long signr
)
1220 const char *pat_ptr
= pattern
;
1221 char *out_ptr
= corename
;
1222 char *const out_end
= corename
+ CORENAME_MAX_SIZE
;
1224 int pid_in_pattern
= 0;
1226 /* Repeat as long as we have more pattern to process and more output
1229 if (*pat_ptr
!= '%') {
1230 if (out_ptr
== out_end
)
1232 *out_ptr
++ = *pat_ptr
++;
1234 switch (*++pat_ptr
) {
1237 /* Double percent, output one percent */
1239 if (out_ptr
== out_end
)
1246 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1247 "%d", current
->tgid
);
1248 if (rc
> out_end
- out_ptr
)
1254 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1255 "%d", current
->uid
);
1256 if (rc
> out_end
- out_ptr
)
1262 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1263 "%d", current
->gid
);
1264 if (rc
> out_end
- out_ptr
)
1268 /* signal that caused the coredump */
1270 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1272 if (rc
> out_end
- out_ptr
)
1276 /* UNIX time of coredump */
1279 do_gettimeofday(&tv
);
1280 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1282 if (rc
> out_end
- out_ptr
)
1289 down_read(&uts_sem
);
1290 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1291 "%s", system_utsname
.nodename
);
1293 if (rc
> out_end
- out_ptr
)
1299 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1300 "%s", current
->comm
);
1301 if (rc
> out_end
- out_ptr
)
1311 /* Backward compatibility with core_uses_pid:
1313 * If core_pattern does not include a %p (as is the default)
1314 * and core_uses_pid is set, then .%pid will be appended to
1317 && (core_uses_pid
|| atomic_read(¤t
->mm
->mm_users
) != 1)) {
1318 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1319 ".%d", current
->tgid
);
1320 if (rc
> out_end
- out_ptr
)
1328 static void zap_threads (struct mm_struct
*mm
)
1330 struct task_struct
*g
, *p
;
1331 struct task_struct
*tsk
= current
;
1332 struct completion
*vfork_done
= tsk
->vfork_done
;
1335 * Make sure nobody is waiting for us to release the VM,
1336 * otherwise we can deadlock when we wait on each other
1339 tsk
->vfork_done
= NULL
;
1340 complete(vfork_done
);
1343 read_lock(&tasklist_lock
);
1345 if (mm
== p
->mm
&& p
!= tsk
) {
1346 force_sig_specific(SIGKILL
, p
);
1349 while_each_thread(g
,p
);
1351 read_unlock(&tasklist_lock
);
1354 static void coredump_wait(struct mm_struct
*mm
)
1356 DECLARE_COMPLETION(startup_done
);
1358 mm
->core_waiters
++; /* let other threads block */
1359 mm
->core_startup_done
= &startup_done
;
1361 /* give other threads a chance to run: */
1365 if (--mm
->core_waiters
) {
1366 up_write(&mm
->mmap_sem
);
1367 wait_for_completion(&startup_done
);
1369 up_write(&mm
->mmap_sem
);
1370 BUG_ON(mm
->core_waiters
);
1373 int do_coredump(long signr
, int exit_code
, struct pt_regs
* regs
)
1375 char corename
[CORENAME_MAX_SIZE
+ 1];
1376 struct mm_struct
*mm
= current
->mm
;
1377 struct linux_binfmt
* binfmt
;
1378 struct inode
* inode
;
1382 binfmt
= current
->binfmt
;
1383 if (!binfmt
|| !binfmt
->core_dump
)
1385 down_write(&mm
->mmap_sem
);
1386 if (!mm
->dumpable
) {
1387 up_write(&mm
->mmap_sem
);
1391 init_completion(&mm
->core_done
);
1392 current
->signal
->group_exit
= 1;
1393 current
->signal
->group_exit_code
= exit_code
;
1396 if (current
->rlim
[RLIMIT_CORE
].rlim_cur
< binfmt
->min_coredump
)
1400 * lock_kernel() because format_corename() is controlled by sysctl, which
1401 * uses lock_kernel()
1404 format_corename(corename
, core_pattern
, signr
);
1406 file
= filp_open(corename
, O_CREAT
| 2 | O_NOFOLLOW
| O_LARGEFILE
, 0600);
1409 inode
= file
->f_dentry
->d_inode
;
1410 if (inode
->i_nlink
> 1)
1411 goto close_fail
; /* multiple links - don't dump */
1412 if (d_unhashed(file
->f_dentry
))
1415 if (!S_ISREG(inode
->i_mode
))
1419 if (!file
->f_op
->write
)
1421 if (do_truncate(file
->f_dentry
, 0) != 0)
1424 retval
= binfmt
->core_dump(signr
, regs
, file
);
1426 current
->signal
->group_exit_code
|= 0x80;
1428 filp_close(file
, NULL
);
1430 complete_all(&mm
->core_done
);