fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / kernel / fork.c
blob8a97e92c604f4ed55bea192ade61bd057beaec0d
1 /*
2 * linux/kernel/fork.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 /*
8 * 'fork.c' contains the help-routines for the 'fork' system call
9 * (see also entry.S and others).
10 * Fork is rather simple, once you get the hang of it, but the memory
11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/unistd.h>
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/completion.h>
20 #include <linux/mnt_namespace.h>
21 #include <linux/personality.h>
22 #include <linux/mempolicy.h>
23 #include <linux/sem.h>
24 #include <linux/file.h>
25 #include <linux/key.h>
26 #include <linux/binfmts.h>
27 #include <linux/mman.h>
28 #include <linux/fs.h>
29 #include <linux/nsproxy.h>
30 #include <linux/capability.h>
31 #include <linux/cpu.h>
32 #include <linux/cpuset.h>
33 #include <linux/security.h>
34 #include <linux/swap.h>
35 #include <linux/syscalls.h>
36 #include <linux/jiffies.h>
37 #include <linux/futex.h>
38 #include <linux/task_io_accounting_ops.h>
39 #include <linux/rcupdate.h>
40 #include <linux/ptrace.h>
41 #include <linux/mount.h>
42 #include <linux/audit.h>
43 #include <linux/profile.h>
44 #include <linux/rmap.h>
45 #include <linux/acct.h>
46 #include <linux/tsacct_kern.h>
47 #include <linux/cn_proc.h>
48 #include <linux/freezer.h>
49 #include <linux/delayacct.h>
50 #include <linux/taskstats_kern.h>
51 #include <linux/random.h>
52 #include <linux/tty.h>
54 #include <asm/pgtable.h>
55 #include <asm/pgalloc.h>
56 #include <asm/uaccess.h>
57 #include <asm/mmu_context.h>
58 #include <asm/cacheflush.h>
59 #include <asm/tlbflush.h>
62 * Protected counters by write_lock_irq(&tasklist_lock)
64 unsigned long total_forks; /* Handle normal Linux uptimes. */
65 int nr_threads; /* The idle threads do not count.. */
67 int max_threads; /* tunable limit on nr_threads */
69 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
71 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
73 int nr_processes(void)
75 int cpu;
76 int total = 0;
78 for_each_online_cpu(cpu)
79 total += per_cpu(process_counts, cpu);
81 return total;
84 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
85 # define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
86 # define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
87 static struct kmem_cache *task_struct_cachep;
88 #endif
90 /* SLAB cache for signal_struct structures (tsk->signal) */
91 static struct kmem_cache *signal_cachep;
93 /* SLAB cache for sighand_struct structures (tsk->sighand) */
94 struct kmem_cache *sighand_cachep;
96 /* SLAB cache for files_struct structures (tsk->files) */
97 struct kmem_cache *files_cachep;
99 /* SLAB cache for fs_struct structures (tsk->fs) */
100 struct kmem_cache *fs_cachep;
102 /* SLAB cache for vm_area_struct structures */
103 struct kmem_cache *vm_area_cachep;
105 /* SLAB cache for mm_struct structures (tsk->mm) */
106 static struct kmem_cache *mm_cachep;
108 void free_task(struct task_struct *tsk)
110 prop_local_destroy_single(&tsk->dirties);
111 free_thread_info(tsk->stack);
112 rt_mutex_debug_task_free(tsk);
113 free_task_struct(tsk);
115 EXPORT_SYMBOL(free_task);
117 void __put_task_struct(struct task_struct *tsk)
119 WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
120 WARN_ON(atomic_read(&tsk->usage));
121 WARN_ON(tsk == current);
123 security_task_free(tsk);
124 free_uid(tsk->user);
125 put_group_info(tsk->group_info);
126 delayacct_tsk_free(tsk);
128 if (!profile_handoff_task(tsk))
129 free_task(tsk);
132 void __init fork_init(unsigned long mempages)
134 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
135 #ifndef ARCH_MIN_TASKALIGN
136 #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
137 #endif
138 /* create a slab on which task_structs can be allocated */
139 task_struct_cachep =
140 kmem_cache_create("task_struct", sizeof(struct task_struct),
141 ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
142 #endif
145 * The default maximum number of threads is set to a safe
146 * value: the thread structures can take up at most half
147 * of memory.
149 max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
152 * we need to allow at least 20 threads to boot a system
154 if(max_threads < 20)
155 max_threads = 20;
157 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
158 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
159 init_task.signal->rlim[RLIMIT_SIGPENDING] =
160 init_task.signal->rlim[RLIMIT_NPROC];
163 static struct task_struct *dup_task_struct(struct task_struct *orig)
165 struct task_struct *tsk;
166 struct thread_info *ti;
167 int err;
169 prepare_to_copy(orig);
171 tsk = alloc_task_struct();
172 if (!tsk)
173 return NULL;
175 ti = alloc_thread_info(tsk);
176 if (!ti) {
177 free_task_struct(tsk);
178 return NULL;
181 *tsk = *orig;
182 tsk->stack = ti;
184 err = prop_local_init_single(&tsk->dirties);
185 if (err) {
186 free_thread_info(ti);
187 free_task_struct(tsk);
188 return NULL;
191 setup_thread_stack(tsk, orig);
193 #ifdef CONFIG_CC_STACKPROTECTOR
194 tsk->stack_canary = get_random_int();
195 #endif
197 /* One for us, one for whoever does the "release_task()" (usually parent) */
198 atomic_set(&tsk->usage,2);
199 atomic_set(&tsk->fs_excl, 0);
200 #ifdef CONFIG_BLK_DEV_IO_TRACE
201 tsk->btrace_seq = 0;
202 #endif
203 tsk->splice_pipe = NULL;
204 return tsk;
207 #ifdef CONFIG_MMU
208 static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
210 struct vm_area_struct *mpnt, *tmp, **pprev;
211 struct rb_node **rb_link, *rb_parent;
212 int retval;
213 unsigned long charge;
214 struct mempolicy *pol;
216 down_write(&oldmm->mmap_sem);
217 flush_cache_dup_mm(oldmm);
219 * Not linked in yet - no deadlock potential:
221 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
223 mm->locked_vm = 0;
224 mm->mmap = NULL;
225 mm->mmap_cache = NULL;
226 mm->free_area_cache = oldmm->mmap_base;
227 mm->cached_hole_size = ~0UL;
228 mm->map_count = 0;
229 cpus_clear(mm->cpu_vm_mask);
230 mm->mm_rb = RB_ROOT;
231 rb_link = &mm->mm_rb.rb_node;
232 rb_parent = NULL;
233 pprev = &mm->mmap;
235 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
236 struct file *file;
238 if (mpnt->vm_flags & VM_DONTCOPY) {
239 long pages = vma_pages(mpnt);
240 mm->total_vm -= pages;
241 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
242 -pages);
243 continue;
245 charge = 0;
246 if (mpnt->vm_flags & VM_ACCOUNT) {
247 unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
248 if (security_vm_enough_memory(len))
249 goto fail_nomem;
250 charge = len;
252 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
253 if (!tmp)
254 goto fail_nomem;
255 *tmp = *mpnt;
256 pol = mpol_copy(vma_policy(mpnt));
257 retval = PTR_ERR(pol);
258 if (IS_ERR(pol))
259 goto fail_nomem_policy;
260 vma_set_policy(tmp, pol);
261 tmp->vm_flags &= ~VM_LOCKED;
262 tmp->vm_mm = mm;
263 tmp->vm_next = NULL;
264 anon_vma_link(tmp);
265 file = tmp->vm_file;
266 if (file) {
267 struct inode *inode = file->f_path.dentry->d_inode;
268 get_file(file);
269 if (tmp->vm_flags & VM_DENYWRITE)
270 atomic_dec(&inode->i_writecount);
272 /* insert tmp into the share list, just after mpnt */
273 spin_lock(&file->f_mapping->i_mmap_lock);
274 tmp->vm_truncate_count = mpnt->vm_truncate_count;
275 flush_dcache_mmap_lock(file->f_mapping);
276 vma_prio_tree_add(tmp, mpnt);
277 flush_dcache_mmap_unlock(file->f_mapping);
278 spin_unlock(&file->f_mapping->i_mmap_lock);
282 * Link in the new vma and copy the page table entries.
284 *pprev = tmp;
285 pprev = &tmp->vm_next;
287 __vma_link_rb(mm, tmp, rb_link, rb_parent);
288 rb_link = &tmp->vm_rb.rb_right;
289 rb_parent = &tmp->vm_rb;
291 mm->map_count++;
292 retval = copy_page_range(mm, oldmm, mpnt);
294 if (tmp->vm_ops && tmp->vm_ops->open)
295 tmp->vm_ops->open(tmp);
297 if (retval)
298 goto out;
300 /* a new mm has just been created */
301 arch_dup_mmap(oldmm, mm);
302 retval = 0;
303 out:
304 up_write(&mm->mmap_sem);
305 flush_tlb_mm(oldmm);
306 up_write(&oldmm->mmap_sem);
307 return retval;
308 fail_nomem_policy:
309 kmem_cache_free(vm_area_cachep, tmp);
310 fail_nomem:
311 retval = -ENOMEM;
312 vm_unacct_memory(charge);
313 goto out;
316 static inline int mm_alloc_pgd(struct mm_struct * mm)
318 mm->pgd = pgd_alloc(mm);
319 if (unlikely(!mm->pgd))
320 return -ENOMEM;
321 return 0;
324 static inline void mm_free_pgd(struct mm_struct * mm)
326 pgd_free(mm->pgd);
328 #else
329 #define dup_mmap(mm, oldmm) (0)
330 #define mm_alloc_pgd(mm) (0)
331 #define mm_free_pgd(mm)
332 #endif /* CONFIG_MMU */
334 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
336 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
337 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
339 #include <linux/init_task.h>
341 static struct mm_struct * mm_init(struct mm_struct * mm)
343 atomic_set(&mm->mm_users, 1);
344 atomic_set(&mm->mm_count, 1);
345 init_rwsem(&mm->mmap_sem);
346 INIT_LIST_HEAD(&mm->mmlist);
347 mm->flags = (current->mm) ? current->mm->flags
348 : MMF_DUMP_FILTER_DEFAULT;
349 mm->core_waiters = 0;
350 mm->nr_ptes = 0;
351 set_mm_counter(mm, file_rss, 0);
352 set_mm_counter(mm, anon_rss, 0);
353 spin_lock_init(&mm->page_table_lock);
354 rwlock_init(&mm->ioctx_list_lock);
355 mm->ioctx_list = NULL;
356 mm->free_area_cache = TASK_UNMAPPED_BASE;
357 mm->cached_hole_size = ~0UL;
359 if (likely(!mm_alloc_pgd(mm))) {
360 mm->def_flags = 0;
361 return mm;
363 free_mm(mm);
364 return NULL;
368 * Allocate and initialize an mm_struct.
370 struct mm_struct * mm_alloc(void)
372 struct mm_struct * mm;
374 mm = allocate_mm();
375 if (mm) {
376 memset(mm, 0, sizeof(*mm));
377 mm = mm_init(mm);
379 return mm;
383 * Called when the last reference to the mm
384 * is dropped: either by a lazy thread or by
385 * mmput. Free the page directory and the mm.
387 void fastcall __mmdrop(struct mm_struct *mm)
389 BUG_ON(mm == &init_mm);
390 mm_free_pgd(mm);
391 destroy_context(mm);
392 free_mm(mm);
396 * Decrement the use count and release all resources for an mm.
398 void mmput(struct mm_struct *mm)
400 might_sleep();
402 if (atomic_dec_and_test(&mm->mm_users)) {
403 exit_aio(mm);
404 exit_mmap(mm);
405 if (!list_empty(&mm->mmlist)) {
406 spin_lock(&mmlist_lock);
407 list_del(&mm->mmlist);
408 spin_unlock(&mmlist_lock);
410 put_swap_token(mm);
411 mmdrop(mm);
414 EXPORT_SYMBOL_GPL(mmput);
417 * get_task_mm - acquire a reference to the task's mm
419 * Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning
420 * this kernel workthread has transiently adopted a user mm with use_mm,
421 * to do its AIO) is not set and if so returns a reference to it, after
422 * bumping up the use count. User must release the mm via mmput()
423 * after use. Typically used by /proc and ptrace.
425 struct mm_struct *get_task_mm(struct task_struct *task)
427 struct mm_struct *mm;
429 task_lock(task);
430 mm = task->mm;
431 if (mm) {
432 if (task->flags & PF_BORROWED_MM)
433 mm = NULL;
434 else
435 atomic_inc(&mm->mm_users);
437 task_unlock(task);
438 return mm;
440 EXPORT_SYMBOL_GPL(get_task_mm);
442 /* Please note the differences between mmput and mm_release.
443 * mmput is called whenever we stop holding onto a mm_struct,
444 * error success whatever.
446 * mm_release is called after a mm_struct has been removed
447 * from the current process.
449 * This difference is important for error handling, when we
450 * only half set up a mm_struct for a new process and need to restore
451 * the old one. Because we mmput the new mm_struct before
452 * restoring the old one. . .
453 * Eric Biederman 10 January 1998
455 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
457 struct completion *vfork_done = tsk->vfork_done;
459 /* Get rid of any cached register state */
460 deactivate_mm(tsk, mm);
462 /* notify parent sleeping on vfork() */
463 if (vfork_done) {
464 tsk->vfork_done = NULL;
465 complete(vfork_done);
469 * If we're exiting normally, clear a user-space tid field if
470 * requested. We leave this alone when dying by signal, to leave
471 * the value intact in a core dump, and to save the unnecessary
472 * trouble otherwise. Userland only wants this done for a sys_exit.
474 if (tsk->clear_child_tid
475 && !(tsk->flags & PF_SIGNALED)
476 && atomic_read(&mm->mm_users) > 1) {
477 u32 __user * tidptr = tsk->clear_child_tid;
478 tsk->clear_child_tid = NULL;
481 * We don't check the error code - if userspace has
482 * not set up a proper pointer then tough luck.
484 put_user(0, tidptr);
485 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
490 * Allocate a new mm structure and copy contents from the
491 * mm structure of the passed in task structure.
493 static struct mm_struct *dup_mm(struct task_struct *tsk)
495 struct mm_struct *mm, *oldmm = current->mm;
496 int err;
498 if (!oldmm)
499 return NULL;
501 mm = allocate_mm();
502 if (!mm)
503 goto fail_nomem;
505 memcpy(mm, oldmm, sizeof(*mm));
507 /* Initializing for Swap token stuff */
508 mm->token_priority = 0;
509 mm->last_interval = 0;
511 if (!mm_init(mm))
512 goto fail_nomem;
514 if (init_new_context(tsk, mm))
515 goto fail_nocontext;
517 err = dup_mmap(mm, oldmm);
518 if (err)
519 goto free_pt;
521 mm->hiwater_rss = get_mm_rss(mm);
522 mm->hiwater_vm = mm->total_vm;
524 return mm;
526 free_pt:
527 mmput(mm);
529 fail_nomem:
530 return NULL;
532 fail_nocontext:
534 * If init_new_context() failed, we cannot use mmput() to free the mm
535 * because it calls destroy_context()
537 mm_free_pgd(mm);
538 free_mm(mm);
539 return NULL;
542 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
544 struct mm_struct * mm, *oldmm;
545 int retval;
547 tsk->min_flt = tsk->maj_flt = 0;
548 tsk->nvcsw = tsk->nivcsw = 0;
550 tsk->mm = NULL;
551 tsk->active_mm = NULL;
554 * Are we cloning a kernel thread?
556 * We need to steal a active VM for that..
558 oldmm = current->mm;
559 if (!oldmm)
560 return 0;
562 if (clone_flags & CLONE_VM) {
563 atomic_inc(&oldmm->mm_users);
564 mm = oldmm;
565 goto good_mm;
568 retval = -ENOMEM;
569 mm = dup_mm(tsk);
570 if (!mm)
571 goto fail_nomem;
573 good_mm:
574 /* Initializing for Swap token stuff */
575 mm->token_priority = 0;
576 mm->last_interval = 0;
578 tsk->mm = mm;
579 tsk->active_mm = mm;
580 return 0;
582 fail_nomem:
583 return retval;
586 static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
588 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
589 /* We don't need to lock fs - think why ;-) */
590 if (fs) {
591 atomic_set(&fs->count, 1);
592 rwlock_init(&fs->lock);
593 fs->umask = old->umask;
594 read_lock(&old->lock);
595 fs->rootmnt = mntget(old->rootmnt);
596 fs->root = dget(old->root);
597 fs->pwdmnt = mntget(old->pwdmnt);
598 fs->pwd = dget(old->pwd);
599 if (old->altroot) {
600 fs->altrootmnt = mntget(old->altrootmnt);
601 fs->altroot = dget(old->altroot);
602 } else {
603 fs->altrootmnt = NULL;
604 fs->altroot = NULL;
606 read_unlock(&old->lock);
608 return fs;
611 struct fs_struct *copy_fs_struct(struct fs_struct *old)
613 return __copy_fs_struct(old);
616 EXPORT_SYMBOL_GPL(copy_fs_struct);
618 static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
620 if (clone_flags & CLONE_FS) {
621 atomic_inc(&current->fs->count);
622 return 0;
624 tsk->fs = __copy_fs_struct(current->fs);
625 if (!tsk->fs)
626 return -ENOMEM;
627 return 0;
630 static int count_open_files(struct fdtable *fdt)
632 int size = fdt->max_fds;
633 int i;
635 /* Find the last open fd */
636 for (i = size/(8*sizeof(long)); i > 0; ) {
637 if (fdt->open_fds->fds_bits[--i])
638 break;
640 i = (i+1) * 8 * sizeof(long);
641 return i;
644 static struct files_struct *alloc_files(void)
646 struct files_struct *newf;
647 struct fdtable *fdt;
649 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
650 if (!newf)
651 goto out;
653 atomic_set(&newf->count, 1);
655 spin_lock_init(&newf->file_lock);
656 newf->next_fd = 0;
657 fdt = &newf->fdtab;
658 fdt->max_fds = NR_OPEN_DEFAULT;
659 fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
660 fdt->open_fds = (fd_set *)&newf->open_fds_init;
661 fdt->fd = &newf->fd_array[0];
662 INIT_RCU_HEAD(&fdt->rcu);
663 fdt->next = NULL;
664 rcu_assign_pointer(newf->fdt, fdt);
665 out:
666 return newf;
670 * Allocate a new files structure and copy contents from the
671 * passed in files structure.
672 * errorp will be valid only when the returned files_struct is NULL.
674 static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
676 struct files_struct *newf;
677 struct file **old_fds, **new_fds;
678 int open_files, size, i;
679 struct fdtable *old_fdt, *new_fdt;
681 *errorp = -ENOMEM;
682 newf = alloc_files();
683 if (!newf)
684 goto out;
686 spin_lock(&oldf->file_lock);
687 old_fdt = files_fdtable(oldf);
688 new_fdt = files_fdtable(newf);
689 open_files = count_open_files(old_fdt);
692 * Check whether we need to allocate a larger fd array and fd set.
693 * Note: we're not a clone task, so the open count won't change.
695 if (open_files > new_fdt->max_fds) {
696 new_fdt->max_fds = 0;
697 spin_unlock(&oldf->file_lock);
698 spin_lock(&newf->file_lock);
699 *errorp = expand_files(newf, open_files-1);
700 spin_unlock(&newf->file_lock);
701 if (*errorp < 0)
702 goto out_release;
703 new_fdt = files_fdtable(newf);
705 * Reacquire the oldf lock and a pointer to its fd table
706 * who knows it may have a new bigger fd table. We need
707 * the latest pointer.
709 spin_lock(&oldf->file_lock);
710 old_fdt = files_fdtable(oldf);
713 old_fds = old_fdt->fd;
714 new_fds = new_fdt->fd;
716 memcpy(new_fdt->open_fds->fds_bits,
717 old_fdt->open_fds->fds_bits, open_files/8);
718 memcpy(new_fdt->close_on_exec->fds_bits,
719 old_fdt->close_on_exec->fds_bits, open_files/8);
721 for (i = open_files; i != 0; i--) {
722 struct file *f = *old_fds++;
723 if (f) {
724 get_file(f);
725 } else {
727 * The fd may be claimed in the fd bitmap but not yet
728 * instantiated in the files array if a sibling thread
729 * is partway through open(). So make sure that this
730 * fd is available to the new process.
732 FD_CLR(open_files - i, new_fdt->open_fds);
734 rcu_assign_pointer(*new_fds++, f);
736 spin_unlock(&oldf->file_lock);
738 /* compute the remainder to be cleared */
739 size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
741 /* This is long word aligned thus could use a optimized version */
742 memset(new_fds, 0, size);
744 if (new_fdt->max_fds > open_files) {
745 int left = (new_fdt->max_fds-open_files)/8;
746 int start = open_files / (8 * sizeof(unsigned long));
748 memset(&new_fdt->open_fds->fds_bits[start], 0, left);
749 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
752 return newf;
754 out_release:
755 kmem_cache_free(files_cachep, newf);
756 out:
757 return NULL;
760 static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
762 struct files_struct *oldf, *newf;
763 int error = 0;
766 * A background process may not have any files ...
768 oldf = current->files;
769 if (!oldf)
770 goto out;
772 if (clone_flags & CLONE_FILES) {
773 atomic_inc(&oldf->count);
774 goto out;
778 * Note: we may be using current for both targets (See exec.c)
779 * This works because we cache current->files (old) as oldf. Don't
780 * break this.
782 tsk->files = NULL;
783 newf = dup_fd(oldf, &error);
784 if (!newf)
785 goto out;
787 tsk->files = newf;
788 error = 0;
789 out:
790 return error;
794 * Helper to unshare the files of the current task.
795 * We don't want to expose copy_files internals to
796 * the exec layer of the kernel.
799 int unshare_files(void)
801 struct files_struct *files = current->files;
802 int rc;
804 BUG_ON(!files);
806 /* This can race but the race causes us to copy when we don't
807 need to and drop the copy */
808 if(atomic_read(&files->count) == 1)
810 atomic_inc(&files->count);
811 return 0;
813 rc = copy_files(0, current);
814 if(rc)
815 current->files = files;
816 return rc;
819 EXPORT_SYMBOL(unshare_files);
821 static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
823 struct sighand_struct *sig;
825 if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
826 atomic_inc(&current->sighand->count);
827 return 0;
829 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
830 rcu_assign_pointer(tsk->sighand, sig);
831 if (!sig)
832 return -ENOMEM;
833 atomic_set(&sig->count, 1);
834 memcpy(sig->action, current->sighand->action, sizeof(sig->action));
835 return 0;
838 void __cleanup_sighand(struct sighand_struct *sighand)
840 if (atomic_dec_and_test(&sighand->count))
841 kmem_cache_free(sighand_cachep, sighand);
844 static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
846 struct signal_struct *sig;
847 int ret;
849 if (clone_flags & CLONE_THREAD) {
850 atomic_inc(&current->signal->count);
851 atomic_inc(&current->signal->live);
852 return 0;
854 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
855 tsk->signal = sig;
856 if (!sig)
857 return -ENOMEM;
859 ret = copy_thread_group_keys(tsk);
860 if (ret < 0) {
861 kmem_cache_free(signal_cachep, sig);
862 return ret;
865 atomic_set(&sig->count, 1);
866 atomic_set(&sig->live, 1);
867 init_waitqueue_head(&sig->wait_chldexit);
868 sig->flags = 0;
869 sig->group_exit_code = 0;
870 sig->group_exit_task = NULL;
871 sig->group_stop_count = 0;
872 sig->curr_target = NULL;
873 init_sigpending(&sig->shared_pending);
874 INIT_LIST_HEAD(&sig->posix_timers);
876 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
877 sig->it_real_incr.tv64 = 0;
878 sig->real_timer.function = it_real_fn;
879 sig->tsk = tsk;
881 sig->it_virt_expires = cputime_zero;
882 sig->it_virt_incr = cputime_zero;
883 sig->it_prof_expires = cputime_zero;
884 sig->it_prof_incr = cputime_zero;
886 sig->leader = 0; /* session leadership doesn't inherit */
887 sig->tty_old_pgrp = NULL;
889 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
890 sig->gtime = cputime_zero;
891 sig->cgtime = cputime_zero;
892 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
893 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
894 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
895 sig->sum_sched_runtime = 0;
896 INIT_LIST_HEAD(&sig->cpu_timers[0]);
897 INIT_LIST_HEAD(&sig->cpu_timers[1]);
898 INIT_LIST_HEAD(&sig->cpu_timers[2]);
899 taskstats_tgid_init(sig);
901 task_lock(current->group_leader);
902 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
903 task_unlock(current->group_leader);
905 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
907 * New sole thread in the process gets an expiry time
908 * of the whole CPU time limit.
910 tsk->it_prof_expires =
911 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
913 acct_init_pacct(&sig->pacct);
915 tty_audit_fork(sig);
917 return 0;
920 void __cleanup_signal(struct signal_struct *sig)
922 exit_thread_group_keys(sig);
923 kmem_cache_free(signal_cachep, sig);
926 static inline void cleanup_signal(struct task_struct *tsk)
928 struct signal_struct *sig = tsk->signal;
930 atomic_dec(&sig->live);
932 if (atomic_dec_and_test(&sig->count))
933 __cleanup_signal(sig);
936 static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
938 unsigned long new_flags = p->flags;
940 new_flags &= ~PF_SUPERPRIV;
941 new_flags |= PF_FORKNOEXEC;
942 if (!(clone_flags & CLONE_PTRACE))
943 p->ptrace = 0;
944 p->flags = new_flags;
947 asmlinkage long sys_set_tid_address(int __user *tidptr)
949 current->clear_child_tid = tidptr;
951 return current->pid;
954 static inline void rt_mutex_init_task(struct task_struct *p)
956 spin_lock_init(&p->pi_lock);
957 #ifdef CONFIG_RT_MUTEXES
958 plist_head_init(&p->pi_waiters, &p->pi_lock);
959 p->pi_blocked_on = NULL;
960 #endif
964 * This creates a new process as a copy of the old one,
965 * but does not actually start it yet.
967 * It copies the registers, and all the appropriate
968 * parts of the process environment (as per the clone
969 * flags). The actual kick-off is left to the caller.
971 static struct task_struct *copy_process(unsigned long clone_flags,
972 unsigned long stack_start,
973 struct pt_regs *regs,
974 unsigned long stack_size,
975 int __user *parent_tidptr,
976 int __user *child_tidptr,
977 struct pid *pid)
979 int retval;
980 struct task_struct *p = NULL;
982 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
983 return ERR_PTR(-EINVAL);
986 * Thread groups must share signals as well, and detached threads
987 * can only be started up within the thread group.
989 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
990 return ERR_PTR(-EINVAL);
993 * Shared signal handlers imply shared VM. By way of the above,
994 * thread groups also imply shared VM. Blocking this case allows
995 * for various simplifications in other code.
997 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
998 return ERR_PTR(-EINVAL);
1000 retval = security_task_create(clone_flags);
1001 if (retval)
1002 goto fork_out;
1004 retval = -ENOMEM;
1005 p = dup_task_struct(current);
1006 if (!p)
1007 goto fork_out;
1009 rt_mutex_init_task(p);
1011 #ifdef CONFIG_TRACE_IRQFLAGS
1012 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1013 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1014 #endif
1015 retval = -EAGAIN;
1016 if (atomic_read(&p->user->processes) >=
1017 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
1018 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
1019 p->user != current->nsproxy->user_ns->root_user)
1020 goto bad_fork_free;
1023 atomic_inc(&p->user->__count);
1024 atomic_inc(&p->user->processes);
1025 get_group_info(p->group_info);
1028 * If multiple threads are within copy_process(), then this check
1029 * triggers too late. This doesn't hurt, the check is only there
1030 * to stop root fork bombs.
1032 if (nr_threads >= max_threads)
1033 goto bad_fork_cleanup_count;
1035 if (!try_module_get(task_thread_info(p)->exec_domain->module))
1036 goto bad_fork_cleanup_count;
1038 if (p->binfmt && !try_module_get(p->binfmt->module))
1039 goto bad_fork_cleanup_put_domain;
1041 p->did_exec = 0;
1042 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
1043 copy_flags(clone_flags, p);
1044 p->pid = pid_nr(pid);
1045 retval = -EFAULT;
1046 if (clone_flags & CLONE_PARENT_SETTID)
1047 if (put_user(p->pid, parent_tidptr))
1048 goto bad_fork_cleanup_delays_binfmt;
1050 INIT_LIST_HEAD(&p->children);
1051 INIT_LIST_HEAD(&p->sibling);
1052 p->vfork_done = NULL;
1053 spin_lock_init(&p->alloc_lock);
1055 clear_tsk_thread_flag(p, TIF_SIGPENDING);
1056 init_sigpending(&p->pending);
1058 p->utime = cputime_zero;
1059 p->stime = cputime_zero;
1060 p->gtime = cputime_zero;
1062 #ifdef CONFIG_TASK_XACCT
1063 p->rchar = 0; /* I/O counter: bytes read */
1064 p->wchar = 0; /* I/O counter: bytes written */
1065 p->syscr = 0; /* I/O counter: read syscalls */
1066 p->syscw = 0; /* I/O counter: write syscalls */
1067 #endif
1068 task_io_accounting_init(p);
1069 acct_clear_integrals(p);
1071 p->it_virt_expires = cputime_zero;
1072 p->it_prof_expires = cputime_zero;
1073 p->it_sched_expires = 0;
1074 INIT_LIST_HEAD(&p->cpu_timers[0]);
1075 INIT_LIST_HEAD(&p->cpu_timers[1]);
1076 INIT_LIST_HEAD(&p->cpu_timers[2]);
1078 p->lock_depth = -1; /* -1 = no lock */
1079 do_posix_clock_monotonic_gettime(&p->start_time);
1080 p->real_start_time = p->start_time;
1081 monotonic_to_bootbased(&p->real_start_time);
1082 p->security = NULL;
1083 p->io_context = NULL;
1084 p->io_wait = NULL;
1085 p->audit_context = NULL;
1086 cpuset_fork(p);
1087 #ifdef CONFIG_NUMA
1088 p->mempolicy = mpol_copy(p->mempolicy);
1089 if (IS_ERR(p->mempolicy)) {
1090 retval = PTR_ERR(p->mempolicy);
1091 p->mempolicy = NULL;
1092 goto bad_fork_cleanup_cpuset;
1094 mpol_fix_fork_child_flag(p);
1095 #endif
1096 #ifdef CONFIG_TRACE_IRQFLAGS
1097 p->irq_events = 0;
1098 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1099 p->hardirqs_enabled = 1;
1100 #else
1101 p->hardirqs_enabled = 0;
1102 #endif
1103 p->hardirq_enable_ip = 0;
1104 p->hardirq_enable_event = 0;
1105 p->hardirq_disable_ip = _THIS_IP_;
1106 p->hardirq_disable_event = 0;
1107 p->softirqs_enabled = 1;
1108 p->softirq_enable_ip = _THIS_IP_;
1109 p->softirq_enable_event = 0;
1110 p->softirq_disable_ip = 0;
1111 p->softirq_disable_event = 0;
1112 p->hardirq_context = 0;
1113 p->softirq_context = 0;
1114 #endif
1115 #ifdef CONFIG_LOCKDEP
1116 p->lockdep_depth = 0; /* no locks held yet */
1117 p->curr_chain_key = 0;
1118 p->lockdep_recursion = 0;
1119 #endif
1121 #ifdef CONFIG_DEBUG_MUTEXES
1122 p->blocked_on = NULL; /* not blocked yet */
1123 #endif
1125 p->tgid = p->pid;
1126 if (clone_flags & CLONE_THREAD)
1127 p->tgid = current->tgid;
1129 if ((retval = security_task_alloc(p)))
1130 goto bad_fork_cleanup_policy;
1131 if ((retval = audit_alloc(p)))
1132 goto bad_fork_cleanup_security;
1133 /* copy all the process information */
1134 if ((retval = copy_semundo(clone_flags, p)))
1135 goto bad_fork_cleanup_audit;
1136 if ((retval = copy_files(clone_flags, p)))
1137 goto bad_fork_cleanup_semundo;
1138 if ((retval = copy_fs(clone_flags, p)))
1139 goto bad_fork_cleanup_files;
1140 if ((retval = copy_sighand(clone_flags, p)))
1141 goto bad_fork_cleanup_fs;
1142 if ((retval = copy_signal(clone_flags, p)))
1143 goto bad_fork_cleanup_sighand;
1144 if ((retval = copy_mm(clone_flags, p)))
1145 goto bad_fork_cleanup_signal;
1146 if ((retval = copy_keys(clone_flags, p)))
1147 goto bad_fork_cleanup_mm;
1148 if ((retval = copy_namespaces(clone_flags, p)))
1149 goto bad_fork_cleanup_keys;
1150 retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
1151 if (retval)
1152 goto bad_fork_cleanup_namespaces;
1154 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1156 * Clear TID on mm_release()?
1158 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
1159 p->robust_list = NULL;
1160 #ifdef CONFIG_COMPAT
1161 p->compat_robust_list = NULL;
1162 #endif
1163 INIT_LIST_HEAD(&p->pi_state_list);
1164 p->pi_state_cache = NULL;
1167 * sigaltstack should be cleared when sharing the same VM
1169 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1170 p->sas_ss_sp = p->sas_ss_size = 0;
1173 * Syscall tracing should be turned off in the child regardless
1174 * of CLONE_PTRACE.
1176 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1177 #ifdef TIF_SYSCALL_EMU
1178 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1179 #endif
1181 /* Our parent execution domain becomes current domain
1182 These must match for thread signalling to apply */
1183 p->parent_exec_id = p->self_exec_id;
1185 /* ok, now we should be set up.. */
1186 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1187 p->pdeath_signal = 0;
1188 p->exit_state = 0;
1191 * Ok, make it visible to the rest of the system.
1192 * We dont wake it up yet.
1194 p->group_leader = p;
1195 INIT_LIST_HEAD(&p->thread_group);
1196 INIT_LIST_HEAD(&p->ptrace_children);
1197 INIT_LIST_HEAD(&p->ptrace_list);
1199 /* Perform scheduler related setup. Assign this task to a CPU. */
1200 sched_fork(p, clone_flags);
1202 /* Need tasklist lock for parent etc handling! */
1203 write_lock_irq(&tasklist_lock);
1205 /* for sys_ioprio_set(IOPRIO_WHO_PGRP) */
1206 p->ioprio = current->ioprio;
1209 * The task hasn't been attached yet, so its cpus_allowed mask will
1210 * not be changed, nor will its assigned CPU.
1212 * The cpus_allowed mask of the parent may have changed after it was
1213 * copied first time - so re-copy it here, then check the child's CPU
1214 * to ensure it is on a valid CPU (and if not, just force it back to
1215 * parent's CPU). This avoids alot of nasty races.
1217 p->cpus_allowed = current->cpus_allowed;
1218 if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
1219 !cpu_online(task_cpu(p))))
1220 set_task_cpu(p, smp_processor_id());
1222 /* CLONE_PARENT re-uses the old parent */
1223 if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
1224 p->real_parent = current->real_parent;
1225 else
1226 p->real_parent = current;
1227 p->parent = p->real_parent;
1229 spin_lock(&current->sighand->siglock);
1232 * Process group and session signals need to be delivered to just the
1233 * parent before the fork or both the parent and the child after the
1234 * fork. Restart if a signal comes in before we add the new process to
1235 * it's process group.
1236 * A fatal signal pending means that current will exit, so the new
1237 * thread can't slip out of an OOM kill (or normal SIGKILL).
1239 recalc_sigpending();
1240 if (signal_pending(current)) {
1241 spin_unlock(&current->sighand->siglock);
1242 write_unlock_irq(&tasklist_lock);
1243 retval = -ERESTARTNOINTR;
1244 goto bad_fork_cleanup_namespaces;
1247 if (clone_flags & CLONE_THREAD) {
1248 p->group_leader = current->group_leader;
1249 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1251 if (!cputime_eq(current->signal->it_virt_expires,
1252 cputime_zero) ||
1253 !cputime_eq(current->signal->it_prof_expires,
1254 cputime_zero) ||
1255 current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
1256 !list_empty(&current->signal->cpu_timers[0]) ||
1257 !list_empty(&current->signal->cpu_timers[1]) ||
1258 !list_empty(&current->signal->cpu_timers[2])) {
1260 * Have child wake up on its first tick to check
1261 * for process CPU timers.
1263 p->it_prof_expires = jiffies_to_cputime(1);
1267 if (likely(p->pid)) {
1268 add_parent(p);
1269 if (unlikely(p->ptrace & PT_PTRACED))
1270 __ptrace_link(p, current->parent);
1272 if (thread_group_leader(p)) {
1273 p->signal->tty = current->signal->tty;
1274 p->signal->pgrp = process_group(current);
1275 set_signal_session(p->signal, process_session(current));
1276 attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1277 attach_pid(p, PIDTYPE_SID, task_session(current));
1279 list_add_tail_rcu(&p->tasks, &init_task.tasks);
1280 __get_cpu_var(process_counts)++;
1282 attach_pid(p, PIDTYPE_PID, pid);
1283 nr_threads++;
1286 total_forks++;
1287 spin_unlock(&current->sighand->siglock);
1288 write_unlock_irq(&tasklist_lock);
1289 proc_fork_connector(p);
1290 return p;
1292 bad_fork_cleanup_namespaces:
1293 exit_task_namespaces(p);
1294 bad_fork_cleanup_keys:
1295 exit_keys(p);
1296 bad_fork_cleanup_mm:
1297 if (p->mm)
1298 mmput(p->mm);
1299 bad_fork_cleanup_signal:
1300 cleanup_signal(p);
1301 bad_fork_cleanup_sighand:
1302 __cleanup_sighand(p->sighand);
1303 bad_fork_cleanup_fs:
1304 exit_fs(p); /* blocking */
1305 bad_fork_cleanup_files:
1306 exit_files(p); /* blocking */
1307 bad_fork_cleanup_semundo:
1308 exit_sem(p);
1309 bad_fork_cleanup_audit:
1310 audit_free(p);
1311 bad_fork_cleanup_security:
1312 security_task_free(p);
1313 bad_fork_cleanup_policy:
1314 #ifdef CONFIG_NUMA
1315 mpol_free(p->mempolicy);
1316 bad_fork_cleanup_cpuset:
1317 #endif
1318 cpuset_exit(p);
1319 bad_fork_cleanup_delays_binfmt:
1320 delayacct_tsk_free(p);
1321 if (p->binfmt)
1322 module_put(p->binfmt->module);
1323 bad_fork_cleanup_put_domain:
1324 module_put(task_thread_info(p)->exec_domain->module);
1325 bad_fork_cleanup_count:
1326 put_group_info(p->group_info);
1327 atomic_dec(&p->user->processes);
1328 free_uid(p->user);
1329 bad_fork_free:
1330 free_task(p);
1331 fork_out:
1332 return ERR_PTR(retval);
1335 noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1337 memset(regs, 0, sizeof(struct pt_regs));
1338 return regs;
1341 struct task_struct * __cpuinit fork_idle(int cpu)
1343 struct task_struct *task;
1344 struct pt_regs regs;
1346 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL,
1347 &init_struct_pid);
1348 if (!IS_ERR(task))
1349 init_idle(task, cpu);
1351 return task;
1354 static inline int fork_traceflag (unsigned clone_flags)
1356 if (clone_flags & CLONE_UNTRACED)
1357 return 0;
1358 else if (clone_flags & CLONE_VFORK) {
1359 if (current->ptrace & PT_TRACE_VFORK)
1360 return PTRACE_EVENT_VFORK;
1361 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1362 if (current->ptrace & PT_TRACE_CLONE)
1363 return PTRACE_EVENT_CLONE;
1364 } else if (current->ptrace & PT_TRACE_FORK)
1365 return PTRACE_EVENT_FORK;
1367 return 0;
1371 * Ok, this is the main fork-routine.
1373 * It copies the process, and if successful kick-starts
1374 * it and waits for it to finish using the VM if required.
1376 long do_fork(unsigned long clone_flags,
1377 unsigned long stack_start,
1378 struct pt_regs *regs,
1379 unsigned long stack_size,
1380 int __user *parent_tidptr,
1381 int __user *child_tidptr)
1383 struct task_struct *p;
1384 int trace = 0;
1385 struct pid *pid = alloc_pid();
1386 long nr;
1388 if (!pid)
1389 return -EAGAIN;
1390 nr = pid->nr;
1391 if (unlikely(current->ptrace)) {
1392 trace = fork_traceflag (clone_flags);
1393 if (trace)
1394 clone_flags |= CLONE_PTRACE;
1397 p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid);
1399 * Do this prior waking up the new thread - the thread pointer
1400 * might get invalid after that point, if the thread exits quickly.
1402 if (!IS_ERR(p)) {
1403 struct completion vfork;
1405 if (clone_flags & CLONE_VFORK) {
1406 p->vfork_done = &vfork;
1407 init_completion(&vfork);
1410 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
1412 * We'll start up with an immediate SIGSTOP.
1414 sigaddset(&p->pending.signal, SIGSTOP);
1415 set_tsk_thread_flag(p, TIF_SIGPENDING);
1418 if (!(clone_flags & CLONE_STOPPED))
1419 wake_up_new_task(p, clone_flags);
1420 else
1421 p->state = TASK_STOPPED;
1423 if (unlikely (trace)) {
1424 current->ptrace_message = nr;
1425 ptrace_notify ((trace << 8) | SIGTRAP);
1428 if (clone_flags & CLONE_VFORK) {
1429 freezer_do_not_count();
1430 wait_for_completion(&vfork);
1431 freezer_count();
1432 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) {
1433 current->ptrace_message = nr;
1434 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
1437 } else {
1438 free_pid(pid);
1439 nr = PTR_ERR(p);
1441 return nr;
1444 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
1445 #define ARCH_MIN_MMSTRUCT_ALIGN 0
1446 #endif
1448 static void sighand_ctor(struct kmem_cache *cachep, void *data)
1450 struct sighand_struct *sighand = data;
1452 spin_lock_init(&sighand->siglock);
1453 init_waitqueue_head(&sighand->signalfd_wqh);
1456 void __init proc_caches_init(void)
1458 sighand_cachep = kmem_cache_create("sighand_cache",
1459 sizeof(struct sighand_struct), 0,
1460 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
1461 sighand_ctor);
1462 signal_cachep = kmem_cache_create("signal_cache",
1463 sizeof(struct signal_struct), 0,
1464 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1465 files_cachep = kmem_cache_create("files_cache",
1466 sizeof(struct files_struct), 0,
1467 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1468 fs_cachep = kmem_cache_create("fs_cache",
1469 sizeof(struct fs_struct), 0,
1470 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1471 vm_area_cachep = kmem_cache_create("vm_area_struct",
1472 sizeof(struct vm_area_struct), 0,
1473 SLAB_PANIC, NULL);
1474 mm_cachep = kmem_cache_create("mm_struct",
1475 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1476 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1480 * Check constraints on flags passed to the unshare system call and
1481 * force unsharing of additional process context as appropriate.
1483 static inline void check_unshare_flags(unsigned long *flags_ptr)
1486 * If unsharing a thread from a thread group, must also
1487 * unshare vm.
1489 if (*flags_ptr & CLONE_THREAD)
1490 *flags_ptr |= CLONE_VM;
1493 * If unsharing vm, must also unshare signal handlers.
1495 if (*flags_ptr & CLONE_VM)
1496 *flags_ptr |= CLONE_SIGHAND;
1499 * If unsharing signal handlers and the task was created
1500 * using CLONE_THREAD, then must unshare the thread
1502 if ((*flags_ptr & CLONE_SIGHAND) &&
1503 (atomic_read(&current->signal->count) > 1))
1504 *flags_ptr |= CLONE_THREAD;
1507 * If unsharing namespace, must also unshare filesystem information.
1509 if (*flags_ptr & CLONE_NEWNS)
1510 *flags_ptr |= CLONE_FS;
1514 * Unsharing of tasks created with CLONE_THREAD is not supported yet
1516 static int unshare_thread(unsigned long unshare_flags)
1518 if (unshare_flags & CLONE_THREAD)
1519 return -EINVAL;
1521 return 0;
1525 * Unshare the filesystem structure if it is being shared
1527 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1529 struct fs_struct *fs = current->fs;
1531 if ((unshare_flags & CLONE_FS) &&
1532 (fs && atomic_read(&fs->count) > 1)) {
1533 *new_fsp = __copy_fs_struct(current->fs);
1534 if (!*new_fsp)
1535 return -ENOMEM;
1538 return 0;
1542 * Unsharing of sighand is not supported yet
1544 static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
1546 struct sighand_struct *sigh = current->sighand;
1548 if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1)
1549 return -EINVAL;
1550 else
1551 return 0;
1555 * Unshare vm if it is being shared
1557 static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
1559 struct mm_struct *mm = current->mm;
1561 if ((unshare_flags & CLONE_VM) &&
1562 (mm && atomic_read(&mm->mm_users) > 1)) {
1563 return -EINVAL;
1566 return 0;
1570 * Unshare file descriptor table if it is being shared
1572 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1574 struct files_struct *fd = current->files;
1575 int error = 0;
1577 if ((unshare_flags & CLONE_FILES) &&
1578 (fd && atomic_read(&fd->count) > 1)) {
1579 *new_fdp = dup_fd(fd, &error);
1580 if (!*new_fdp)
1581 return error;
1584 return 0;
1588 * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not
1589 * supported yet
1591 static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp)
1593 if (unshare_flags & CLONE_SYSVSEM)
1594 return -EINVAL;
1596 return 0;
1600 * unshare allows a process to 'unshare' part of the process
1601 * context which was originally shared using clone. copy_*
1602 * functions used by do_fork() cannot be used here directly
1603 * because they modify an inactive task_struct that is being
1604 * constructed. Here we are modifying the current, active,
1605 * task_struct.
1607 asmlinkage long sys_unshare(unsigned long unshare_flags)
1609 int err = 0;
1610 struct fs_struct *fs, *new_fs = NULL;
1611 struct sighand_struct *new_sigh = NULL;
1612 struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
1613 struct files_struct *fd, *new_fd = NULL;
1614 struct sem_undo_list *new_ulist = NULL;
1615 struct nsproxy *new_nsproxy = NULL, *old_nsproxy = NULL;
1617 check_unshare_flags(&unshare_flags);
1619 /* Return -EINVAL for all unsupported flags */
1620 err = -EINVAL;
1621 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1622 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1623 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER|
1624 CLONE_NEWNET))
1625 goto bad_unshare_out;
1627 if ((err = unshare_thread(unshare_flags)))
1628 goto bad_unshare_out;
1629 if ((err = unshare_fs(unshare_flags, &new_fs)))
1630 goto bad_unshare_cleanup_thread;
1631 if ((err = unshare_sighand(unshare_flags, &new_sigh)))
1632 goto bad_unshare_cleanup_fs;
1633 if ((err = unshare_vm(unshare_flags, &new_mm)))
1634 goto bad_unshare_cleanup_sigh;
1635 if ((err = unshare_fd(unshare_flags, &new_fd)))
1636 goto bad_unshare_cleanup_vm;
1637 if ((err = unshare_semundo(unshare_flags, &new_ulist)))
1638 goto bad_unshare_cleanup_fd;
1639 if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
1640 new_fs)))
1641 goto bad_unshare_cleanup_semundo;
1643 if (new_fs || new_mm || new_fd || new_ulist || new_nsproxy) {
1645 task_lock(current);
1647 if (new_nsproxy) {
1648 old_nsproxy = current->nsproxy;
1649 current->nsproxy = new_nsproxy;
1650 new_nsproxy = old_nsproxy;
1653 if (new_fs) {
1654 fs = current->fs;
1655 current->fs = new_fs;
1656 new_fs = fs;
1659 if (new_mm) {
1660 mm = current->mm;
1661 active_mm = current->active_mm;
1662 current->mm = new_mm;
1663 current->active_mm = new_mm;
1664 activate_mm(active_mm, new_mm);
1665 new_mm = mm;
1668 if (new_fd) {
1669 fd = current->files;
1670 current->files = new_fd;
1671 new_fd = fd;
1674 task_unlock(current);
1677 if (new_nsproxy)
1678 put_nsproxy(new_nsproxy);
1680 bad_unshare_cleanup_semundo:
1681 bad_unshare_cleanup_fd:
1682 if (new_fd)
1683 put_files_struct(new_fd);
1685 bad_unshare_cleanup_vm:
1686 if (new_mm)
1687 mmput(new_mm);
1689 bad_unshare_cleanup_sigh:
1690 if (new_sigh)
1691 if (atomic_dec_and_test(&new_sigh->count))
1692 kmem_cache_free(sighand_cachep, new_sigh);
1694 bad_unshare_cleanup_fs:
1695 if (new_fs)
1696 put_fs_struct(new_fs);
1698 bad_unshare_cleanup_thread:
1699 bad_unshare_out:
1700 return err;