1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1991, 1992 Linus Torvalds
9 * 'fork.c' contains the help-routines for the 'fork' system call
10 * (see also entry.S and others).
11 * Fork is rather simple, once you get the hang of it, but the memory
12 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
15 #include <linux/anon_inodes.h>
16 #include <linux/slab.h>
17 #include <linux/sched/autogroup.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/user.h>
20 #include <linux/sched/numa_balancing.h>
21 #include <linux/sched/stat.h>
22 #include <linux/sched/task.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/sched/cputime.h>
25 #include <linux/sched/ext.h>
26 #include <linux/seq_file.h>
27 #include <linux/rtmutex.h>
28 #include <linux/init.h>
29 #include <linux/unistd.h>
30 #include <linux/module.h>
31 #include <linux/vmalloc.h>
32 #include <linux/completion.h>
33 #include <linux/personality.h>
34 #include <linux/mempolicy.h>
35 #include <linux/sem.h>
36 #include <linux/file.h>
37 #include <linux/fdtable.h>
38 #include <linux/iocontext.h>
39 #include <linux/key.h>
40 #include <linux/kmsan.h>
41 #include <linux/binfmts.h>
42 #include <linux/mman.h>
43 #include <linux/mmu_notifier.h>
46 #include <linux/mm_inline.h>
47 #include <linux/memblock.h>
48 #include <linux/nsproxy.h>
49 #include <linux/capability.h>
50 #include <linux/cpu.h>
51 #include <linux/cgroup.h>
52 #include <linux/security.h>
53 #include <linux/hugetlb.h>
54 #include <linux/seccomp.h>
55 #include <linux/swap.h>
56 #include <linux/syscalls.h>
57 #include <linux/syscall_user_dispatch.h>
58 #include <linux/jiffies.h>
59 #include <linux/futex.h>
60 #include <linux/compat.h>
61 #include <linux/kthread.h>
62 #include <linux/task_io_accounting_ops.h>
63 #include <linux/rcupdate.h>
64 #include <linux/ptrace.h>
65 #include <linux/mount.h>
66 #include <linux/audit.h>
67 #include <linux/memcontrol.h>
68 #include <linux/ftrace.h>
69 #include <linux/proc_fs.h>
70 #include <linux/profile.h>
71 #include <linux/rmap.h>
72 #include <linux/ksm.h>
73 #include <linux/acct.h>
74 #include <linux/userfaultfd_k.h>
75 #include <linux/tsacct_kern.h>
76 #include <linux/cn_proc.h>
77 #include <linux/freezer.h>
78 #include <linux/delayacct.h>
79 #include <linux/taskstats_kern.h>
80 #include <linux/tty.h>
81 #include <linux/fs_struct.h>
82 #include <linux/magic.h>
83 #include <linux/perf_event.h>
84 #include <linux/posix-timers.h>
85 #include <linux/user-return-notifier.h>
86 #include <linux/oom.h>
87 #include <linux/khugepaged.h>
88 #include <linux/signalfd.h>
89 #include <linux/uprobes.h>
90 #include <linux/aio.h>
91 #include <linux/compiler.h>
92 #include <linux/sysctl.h>
93 #include <linux/kcov.h>
94 #include <linux/livepatch.h>
95 #include <linux/thread_info.h>
96 #include <linux/stackleak.h>
97 #include <linux/kasan.h>
98 #include <linux/scs.h>
99 #include <linux/io_uring.h>
100 #include <linux/bpf.h>
101 #include <linux/stackprotector.h>
102 #include <linux/user_events.h>
103 #include <linux/iommu.h>
104 #include <linux/rseq.h>
105 #include <uapi/linux/pidfd.h>
106 #include <linux/pidfs.h>
107 #include <linux/tick.h>
109 #include <asm/pgalloc.h>
110 #include <linux/uaccess.h>
111 #include <asm/mmu_context.h>
112 #include <asm/cacheflush.h>
113 #include <asm/tlbflush.h>
115 #include <trace/events/sched.h>
117 #define CREATE_TRACE_POINTS
118 #include <trace/events/task.h>
120 #include <kunit/visibility.h>
123 * Minimum number of threads to boot the kernel
125 #define MIN_THREADS 20
128 * Maximum number of threads
130 #define MAX_THREADS FUTEX_TID_MASK
133 * Protected counters by write_lock_irq(&tasklist_lock)
135 unsigned long total_forks
; /* Handle normal Linux uptimes. */
136 int nr_threads
; /* The idle threads do not count.. */
138 static int max_threads
; /* tunable limit on nr_threads */
140 #define NAMED_ARRAY_INDEX(x) [x] = __stringify(x)
142 static const char * const resident_page_types
[] = {
143 NAMED_ARRAY_INDEX(MM_FILEPAGES
),
144 NAMED_ARRAY_INDEX(MM_ANONPAGES
),
145 NAMED_ARRAY_INDEX(MM_SWAPENTS
),
146 NAMED_ARRAY_INDEX(MM_SHMEMPAGES
),
149 DEFINE_PER_CPU(unsigned long, process_counts
) = 0;
151 __cacheline_aligned
DEFINE_RWLOCK(tasklist_lock
); /* outer */
153 #ifdef CONFIG_PROVE_RCU
154 int lockdep_tasklist_lock_is_held(void)
156 return lockdep_is_held(&tasklist_lock
);
158 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held
);
159 #endif /* #ifdef CONFIG_PROVE_RCU */
161 int nr_processes(void)
166 for_each_possible_cpu(cpu
)
167 total
+= per_cpu(process_counts
, cpu
);
172 void __weak
arch_release_task_struct(struct task_struct
*tsk
)
176 static struct kmem_cache
*task_struct_cachep
;
178 static inline struct task_struct
*alloc_task_struct_node(int node
)
180 return kmem_cache_alloc_node(task_struct_cachep
, GFP_KERNEL
, node
);
183 static inline void free_task_struct(struct task_struct
*tsk
)
185 kmem_cache_free(task_struct_cachep
, tsk
);
189 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
190 * kmemcache based allocator.
192 # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
194 # ifdef CONFIG_VMAP_STACK
196 * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
197 * flush. Try to minimize the number of calls by caching stacks.
199 #define NR_CACHED_STACKS 2
200 static DEFINE_PER_CPU(struct vm_struct
*, cached_stacks
[NR_CACHED_STACKS
]);
204 struct vm_struct
*stack_vm_area
;
207 static bool try_release_thread_stack_to_cache(struct vm_struct
*vm
)
211 for (i
= 0; i
< NR_CACHED_STACKS
; i
++) {
212 struct vm_struct
*tmp
= NULL
;
214 if (this_cpu_try_cmpxchg(cached_stacks
[i
], &tmp
, vm
))
220 static void thread_stack_free_rcu(struct rcu_head
*rh
)
222 struct vm_stack
*vm_stack
= container_of(rh
, struct vm_stack
, rcu
);
224 if (try_release_thread_stack_to_cache(vm_stack
->stack_vm_area
))
230 static void thread_stack_delayed_free(struct task_struct
*tsk
)
232 struct vm_stack
*vm_stack
= tsk
->stack
;
234 vm_stack
->stack_vm_area
= tsk
->stack_vm_area
;
235 call_rcu(&vm_stack
->rcu
, thread_stack_free_rcu
);
238 static int free_vm_stack_cache(unsigned int cpu
)
240 struct vm_struct
**cached_vm_stacks
= per_cpu_ptr(cached_stacks
, cpu
);
243 for (i
= 0; i
< NR_CACHED_STACKS
; i
++) {
244 struct vm_struct
*vm_stack
= cached_vm_stacks
[i
];
249 vfree(vm_stack
->addr
);
250 cached_vm_stacks
[i
] = NULL
;
256 static int memcg_charge_kernel_stack(struct vm_struct
*vm
)
262 BUG_ON(vm
->nr_pages
!= THREAD_SIZE
/ PAGE_SIZE
);
264 for (i
= 0; i
< THREAD_SIZE
/ PAGE_SIZE
; i
++) {
265 ret
= memcg_kmem_charge_page(vm
->pages
[i
], GFP_KERNEL
, 0);
272 for (i
= 0; i
< nr_charged
; i
++)
273 memcg_kmem_uncharge_page(vm
->pages
[i
], 0);
277 static int alloc_thread_stack_node(struct task_struct
*tsk
, int node
)
279 struct vm_struct
*vm
;
283 for (i
= 0; i
< NR_CACHED_STACKS
; i
++) {
286 s
= this_cpu_xchg(cached_stacks
[i
], NULL
);
291 /* Reset stack metadata. */
292 kasan_unpoison_range(s
->addr
, THREAD_SIZE
);
294 stack
= kasan_reset_tag(s
->addr
);
296 /* Clear stale pointers from reused stack. */
297 memset(stack
, 0, THREAD_SIZE
);
299 if (memcg_charge_kernel_stack(s
)) {
304 tsk
->stack_vm_area
= s
;
310 * Allocated stacks are cached and later reused by new threads,
311 * so memcg accounting is performed manually on assigning/releasing
312 * stacks to tasks. Drop __GFP_ACCOUNT.
314 stack
= __vmalloc_node_range(THREAD_SIZE
, THREAD_ALIGN
,
315 VMALLOC_START
, VMALLOC_END
,
316 THREADINFO_GFP
& ~__GFP_ACCOUNT
,
318 0, node
, __builtin_return_address(0));
322 vm
= find_vm_area(stack
);
323 if (memcg_charge_kernel_stack(vm
)) {
328 * We can't call find_vm_area() in interrupt context, and
329 * free_thread_stack() can be called in interrupt context,
330 * so cache the vm_struct.
332 tsk
->stack_vm_area
= vm
;
333 stack
= kasan_reset_tag(stack
);
338 static void free_thread_stack(struct task_struct
*tsk
)
340 if (!try_release_thread_stack_to_cache(tsk
->stack_vm_area
))
341 thread_stack_delayed_free(tsk
);
344 tsk
->stack_vm_area
= NULL
;
347 # else /* !CONFIG_VMAP_STACK */
349 static void thread_stack_free_rcu(struct rcu_head
*rh
)
351 __free_pages(virt_to_page(rh
), THREAD_SIZE_ORDER
);
354 static void thread_stack_delayed_free(struct task_struct
*tsk
)
356 struct rcu_head
*rh
= tsk
->stack
;
358 call_rcu(rh
, thread_stack_free_rcu
);
361 static int alloc_thread_stack_node(struct task_struct
*tsk
, int node
)
363 struct page
*page
= alloc_pages_node(node
, THREADINFO_GFP
,
367 tsk
->stack
= kasan_reset_tag(page_address(page
));
373 static void free_thread_stack(struct task_struct
*tsk
)
375 thread_stack_delayed_free(tsk
);
379 # endif /* CONFIG_VMAP_STACK */
380 # else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */
382 static struct kmem_cache
*thread_stack_cache
;
384 static void thread_stack_free_rcu(struct rcu_head
*rh
)
386 kmem_cache_free(thread_stack_cache
, rh
);
389 static void thread_stack_delayed_free(struct task_struct
*tsk
)
391 struct rcu_head
*rh
= tsk
->stack
;
393 call_rcu(rh
, thread_stack_free_rcu
);
396 static int alloc_thread_stack_node(struct task_struct
*tsk
, int node
)
398 unsigned long *stack
;
399 stack
= kmem_cache_alloc_node(thread_stack_cache
, THREADINFO_GFP
, node
);
400 stack
= kasan_reset_tag(stack
);
402 return stack
? 0 : -ENOMEM
;
405 static void free_thread_stack(struct task_struct
*tsk
)
407 thread_stack_delayed_free(tsk
);
411 void thread_stack_cache_init(void)
413 thread_stack_cache
= kmem_cache_create_usercopy("thread_stack",
414 THREAD_SIZE
, THREAD_SIZE
, 0, 0,
416 BUG_ON(thread_stack_cache
== NULL
);
419 # endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
421 /* SLAB cache for signal_struct structures (tsk->signal) */
422 static struct kmem_cache
*signal_cachep
;
424 /* SLAB cache for sighand_struct structures (tsk->sighand) */
425 struct kmem_cache
*sighand_cachep
;
427 /* SLAB cache for files_struct structures (tsk->files) */
428 struct kmem_cache
*files_cachep
;
430 /* SLAB cache for fs_struct structures (tsk->fs) */
431 struct kmem_cache
*fs_cachep
;
433 /* SLAB cache for vm_area_struct structures */
434 static struct kmem_cache
*vm_area_cachep
;
436 /* SLAB cache for mm_struct structures (tsk->mm) */
437 static struct kmem_cache
*mm_cachep
;
439 #ifdef CONFIG_PER_VMA_LOCK
441 /* SLAB cache for vm_area_struct.lock */
442 static struct kmem_cache
*vma_lock_cachep
;
444 static bool vma_lock_alloc(struct vm_area_struct
*vma
)
446 vma
->vm_lock
= kmem_cache_alloc(vma_lock_cachep
, GFP_KERNEL
);
450 init_rwsem(&vma
->vm_lock
->lock
);
451 vma
->vm_lock_seq
= -1;
456 static inline void vma_lock_free(struct vm_area_struct
*vma
)
458 kmem_cache_free(vma_lock_cachep
, vma
->vm_lock
);
461 #else /* CONFIG_PER_VMA_LOCK */
463 static inline bool vma_lock_alloc(struct vm_area_struct
*vma
) { return true; }
464 static inline void vma_lock_free(struct vm_area_struct
*vma
) {}
466 #endif /* CONFIG_PER_VMA_LOCK */
468 struct vm_area_struct
*vm_area_alloc(struct mm_struct
*mm
)
470 struct vm_area_struct
*vma
;
472 vma
= kmem_cache_alloc(vm_area_cachep
, GFP_KERNEL
);
477 if (!vma_lock_alloc(vma
)) {
478 kmem_cache_free(vm_area_cachep
, vma
);
485 struct vm_area_struct
*vm_area_dup(struct vm_area_struct
*orig
)
487 struct vm_area_struct
*new = kmem_cache_alloc(vm_area_cachep
, GFP_KERNEL
);
492 ASSERT_EXCLUSIVE_WRITER(orig
->vm_flags
);
493 ASSERT_EXCLUSIVE_WRITER(orig
->vm_file
);
495 * orig->shared.rb may be modified concurrently, but the clone
496 * will be reinitialized.
498 data_race(memcpy(new, orig
, sizeof(*new)));
499 if (!vma_lock_alloc(new)) {
500 kmem_cache_free(vm_area_cachep
, new);
503 INIT_LIST_HEAD(&new->anon_vma_chain
);
504 vma_numab_state_init(new);
505 dup_anon_vma_name(orig
, new);
510 void __vm_area_free(struct vm_area_struct
*vma
)
512 vma_numab_state_free(vma
);
513 free_anon_vma_name(vma
);
515 kmem_cache_free(vm_area_cachep
, vma
);
518 #ifdef CONFIG_PER_VMA_LOCK
519 static void vm_area_free_rcu_cb(struct rcu_head
*head
)
521 struct vm_area_struct
*vma
= container_of(head
, struct vm_area_struct
,
524 /* The vma should not be locked while being destroyed. */
525 VM_BUG_ON_VMA(rwsem_is_locked(&vma
->vm_lock
->lock
), vma
);
530 void vm_area_free(struct vm_area_struct
*vma
)
532 #ifdef CONFIG_PER_VMA_LOCK
533 call_rcu(&vma
->vm_rcu
, vm_area_free_rcu_cb
);
539 static void account_kernel_stack(struct task_struct
*tsk
, int account
)
541 if (IS_ENABLED(CONFIG_VMAP_STACK
)) {
542 struct vm_struct
*vm
= task_stack_vm_area(tsk
);
545 for (i
= 0; i
< THREAD_SIZE
/ PAGE_SIZE
; i
++)
546 mod_lruvec_page_state(vm
->pages
[i
], NR_KERNEL_STACK_KB
,
547 account
* (PAGE_SIZE
/ 1024));
549 void *stack
= task_stack_page(tsk
);
551 /* All stack pages are in the same node. */
552 mod_lruvec_kmem_state(stack
, NR_KERNEL_STACK_KB
,
553 account
* (THREAD_SIZE
/ 1024));
557 void exit_task_stack_account(struct task_struct
*tsk
)
559 account_kernel_stack(tsk
, -1);
561 if (IS_ENABLED(CONFIG_VMAP_STACK
)) {
562 struct vm_struct
*vm
;
565 vm
= task_stack_vm_area(tsk
);
566 for (i
= 0; i
< THREAD_SIZE
/ PAGE_SIZE
; i
++)
567 memcg_kmem_uncharge_page(vm
->pages
[i
], 0);
571 static void release_task_stack(struct task_struct
*tsk
)
573 if (WARN_ON(READ_ONCE(tsk
->__state
) != TASK_DEAD
))
574 return; /* Better to leak the stack than to free prematurely */
576 free_thread_stack(tsk
);
579 #ifdef CONFIG_THREAD_INFO_IN_TASK
580 void put_task_stack(struct task_struct
*tsk
)
582 if (refcount_dec_and_test(&tsk
->stack_refcount
))
583 release_task_stack(tsk
);
587 void free_task(struct task_struct
*tsk
)
589 #ifdef CONFIG_SECCOMP
590 WARN_ON_ONCE(tsk
->seccomp
.filter
);
592 release_user_cpus_ptr(tsk
);
595 #ifndef CONFIG_THREAD_INFO_IN_TASK
597 * The task is finally done with both the stack and thread_info,
600 release_task_stack(tsk
);
603 * If the task had a separate stack allocation, it should be gone
606 WARN_ON_ONCE(refcount_read(&tsk
->stack_refcount
) != 0);
608 rt_mutex_debug_task_free(tsk
);
609 ftrace_graph_exit_task(tsk
);
610 arch_release_task_struct(tsk
);
611 if (tsk
->flags
& PF_KTHREAD
)
612 free_kthread_struct(tsk
);
613 bpf_task_storage_free(tsk
);
614 free_task_struct(tsk
);
616 EXPORT_SYMBOL(free_task
);
618 static void dup_mm_exe_file(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
620 struct file
*exe_file
;
622 exe_file
= get_mm_exe_file(oldmm
);
623 RCU_INIT_POINTER(mm
->exe_file
, exe_file
);
625 * We depend on the oldmm having properly denied write access to the
628 if (exe_file
&& deny_write_access(exe_file
))
629 pr_warn_once("deny_write_access() failed in %s\n", __func__
);
633 static __latent_entropy
int dup_mmap(struct mm_struct
*mm
,
634 struct mm_struct
*oldmm
)
636 struct vm_area_struct
*mpnt
, *tmp
;
638 unsigned long charge
= 0;
640 VMA_ITERATOR(vmi
, mm
, 0);
642 uprobe_start_dup_mmap();
643 if (mmap_write_lock_killable(oldmm
)) {
645 goto fail_uprobe_end
;
647 flush_cache_dup_mm(oldmm
);
648 uprobe_dup_mmap(oldmm
, mm
);
650 * Not linked in yet - no deadlock potential:
652 mmap_write_lock_nested(mm
, SINGLE_DEPTH_NESTING
);
654 /* No ordering required: file already has been exposed. */
655 dup_mm_exe_file(mm
, oldmm
);
657 mm
->total_vm
= oldmm
->total_vm
;
658 mm
->data_vm
= oldmm
->data_vm
;
659 mm
->exec_vm
= oldmm
->exec_vm
;
660 mm
->stack_vm
= oldmm
->stack_vm
;
662 /* Use __mt_dup() to efficiently build an identical maple tree. */
663 retval
= __mt_dup(&oldmm
->mm_mt
, &mm
->mm_mt
, GFP_KERNEL
);
664 if (unlikely(retval
))
667 mt_clear_in_rcu(vmi
.mas
.tree
);
668 for_each_vma(vmi
, mpnt
) {
671 vma_start_write(mpnt
);
672 if (mpnt
->vm_flags
& VM_DONTCOPY
) {
673 retval
= vma_iter_clear_gfp(&vmi
, mpnt
->vm_start
,
674 mpnt
->vm_end
, GFP_KERNEL
);
678 vm_stat_account(mm
, mpnt
->vm_flags
, -vma_pages(mpnt
));
683 * Don't duplicate many vmas if we've been oom-killed (for
686 if (fatal_signal_pending(current
)) {
690 if (mpnt
->vm_flags
& VM_ACCOUNT
) {
691 unsigned long len
= vma_pages(mpnt
);
693 if (security_vm_enough_memory_mm(oldmm
, len
)) /* sic */
697 tmp
= vm_area_dup(mpnt
);
700 retval
= vma_dup_policy(mpnt
, tmp
);
702 goto fail_nomem_policy
;
704 retval
= dup_userfaultfd(tmp
, &uf
);
706 goto fail_nomem_anon_vma_fork
;
707 if (tmp
->vm_flags
& VM_WIPEONFORK
) {
709 * VM_WIPEONFORK gets a clean slate in the child.
710 * Don't prepare anon_vma until fault since we don't
711 * copy page for current vma.
713 tmp
->anon_vma
= NULL
;
714 } else if (anon_vma_fork(tmp
, mpnt
))
715 goto fail_nomem_anon_vma_fork
;
716 vm_flags_clear(tmp
, VM_LOCKED_MASK
);
718 * Copy/update hugetlb private vma information.
720 if (is_vm_hugetlb_page(tmp
))
721 hugetlb_dup_vma_private(tmp
);
724 * Link the vma into the MT. After using __mt_dup(), memory
725 * allocation is not necessary here, so it cannot fail.
727 vma_iter_bulk_store(&vmi
, tmp
);
731 if (tmp
->vm_ops
&& tmp
->vm_ops
->open
)
732 tmp
->vm_ops
->open(tmp
);
736 struct address_space
*mapping
= file
->f_mapping
;
739 i_mmap_lock_write(mapping
);
740 if (vma_is_shared_maywrite(tmp
))
741 mapping_allow_writable(mapping
);
742 flush_dcache_mmap_lock(mapping
);
743 /* insert tmp into the share list, just after mpnt */
744 vma_interval_tree_insert_after(tmp
, mpnt
,
746 flush_dcache_mmap_unlock(mapping
);
747 i_mmap_unlock_write(mapping
);
750 if (!(tmp
->vm_flags
& VM_WIPEONFORK
))
751 retval
= copy_page_range(tmp
, mpnt
);
754 mpnt
= vma_next(&vmi
);
758 /* a new mm has just been created */
759 retval
= arch_dup_mmap(oldmm
, mm
);
763 mt_set_in_rcu(vmi
.mas
.tree
);
765 khugepaged_fork(mm
, oldmm
);
768 * The entire maple tree has already been duplicated. If the
769 * mmap duplication fails, mark the failure point with
770 * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered,
771 * stop releasing VMAs that have not been duplicated after this
774 mas_set_range(&vmi
.mas
, mpnt
->vm_start
, mpnt
->vm_end
- 1);
775 mas_store(&vmi
.mas
, XA_ZERO_ENTRY
);
778 mmap_write_unlock(mm
);
780 mmap_write_unlock(oldmm
);
782 dup_userfaultfd_complete(&uf
);
784 dup_userfaultfd_fail(&uf
);
786 uprobe_end_dup_mmap();
789 fail_nomem_anon_vma_fork
:
790 mpol_put(vma_policy(tmp
));
795 vm_unacct_memory(charge
);
799 static inline int mm_alloc_pgd(struct mm_struct
*mm
)
801 mm
->pgd
= pgd_alloc(mm
);
802 if (unlikely(!mm
->pgd
))
807 static inline void mm_free_pgd(struct mm_struct
*mm
)
809 pgd_free(mm
, mm
->pgd
);
812 static int dup_mmap(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
814 mmap_write_lock(oldmm
);
815 dup_mm_exe_file(mm
, oldmm
);
816 mmap_write_unlock(oldmm
);
819 #define mm_alloc_pgd(mm) (0)
820 #define mm_free_pgd(mm)
821 #endif /* CONFIG_MMU */
823 static void check_mm(struct mm_struct
*mm
)
827 BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types
) != NR_MM_COUNTERS
,
828 "Please make sure 'struct resident_page_types[]' is updated as well");
830 for (i
= 0; i
< NR_MM_COUNTERS
; i
++) {
831 long x
= percpu_counter_sum(&mm
->rss_stat
[i
]);
834 pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
835 mm
, resident_page_types
[i
], x
);
838 if (mm_pgtables_bytes(mm
))
839 pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
840 mm_pgtables_bytes(mm
));
842 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
843 VM_BUG_ON_MM(mm
->pmd_huge_pte
, mm
);
847 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
848 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
850 static void do_check_lazy_tlb(void *arg
)
852 struct mm_struct
*mm
= arg
;
854 WARN_ON_ONCE(current
->active_mm
== mm
);
857 static void do_shoot_lazy_tlb(void *arg
)
859 struct mm_struct
*mm
= arg
;
861 if (current
->active_mm
== mm
) {
862 WARN_ON_ONCE(current
->mm
);
863 current
->active_mm
= &init_mm
;
864 switch_mm(mm
, &init_mm
, current
);
868 static void cleanup_lazy_tlbs(struct mm_struct
*mm
)
870 if (!IS_ENABLED(CONFIG_MMU_LAZY_TLB_SHOOTDOWN
)) {
872 * In this case, lazy tlb mms are refounted and would not reach
873 * __mmdrop until all CPUs have switched away and mmdrop()ed.
879 * Lazy mm shootdown does not refcount "lazy tlb mm" usage, rather it
880 * requires lazy mm users to switch to another mm when the refcount
881 * drops to zero, before the mm is freed. This requires IPIs here to
882 * switch kernel threads to init_mm.
884 * archs that use IPIs to flush TLBs can piggy-back that lazy tlb mm
885 * switch with the final userspace teardown TLB flush which leaves the
886 * mm lazy on this CPU but no others, reducing the need for additional
887 * IPIs here. There are cases where a final IPI is still required here,
888 * such as the final mmdrop being performed on a different CPU than the
889 * one exiting, or kernel threads using the mm when userspace exits.
891 * IPI overheads have not found to be expensive, but they could be
892 * reduced in a number of possible ways, for example (roughly
893 * increasing order of complexity):
894 * - The last lazy reference created by exit_mm() could instead switch
895 * to init_mm, however it's probable this will run on the same CPU
896 * immediately afterwards, so this may not reduce IPIs much.
897 * - A batch of mms requiring IPIs could be gathered and freed at once.
898 * - CPUs store active_mm where it can be remotely checked without a
899 * lock, to filter out false-positives in the cpumask.
900 * - After mm_users or mm_count reaches zero, switching away from the
901 * mm could clear mm_cpumask to reduce some IPIs, perhaps together
902 * with some batching or delaying of the final IPIs.
903 * - A delayed freeing and RCU-like quiescing sequence based on mm
904 * switching to avoid IPIs completely.
906 on_each_cpu_mask(mm_cpumask(mm
), do_shoot_lazy_tlb
, (void *)mm
, 1);
907 if (IS_ENABLED(CONFIG_DEBUG_VM_SHOOT_LAZIES
))
908 on_each_cpu(do_check_lazy_tlb
, (void *)mm
, 1);
912 * Called when the last reference to the mm
913 * is dropped: either by a lazy thread or by
914 * mmput. Free the page directory and the mm.
916 void __mmdrop(struct mm_struct
*mm
)
918 BUG_ON(mm
== &init_mm
);
919 WARN_ON_ONCE(mm
== current
->mm
);
921 /* Ensure no CPUs are using this as their lazy tlb mm */
922 cleanup_lazy_tlbs(mm
);
924 WARN_ON_ONCE(mm
== current
->active_mm
);
927 mmu_notifier_subscriptions_destroy(mm
);
929 put_user_ns(mm
->user_ns
);
932 percpu_counter_destroy_many(mm
->rss_stat
, NR_MM_COUNTERS
);
936 EXPORT_SYMBOL_GPL(__mmdrop
);
938 static void mmdrop_async_fn(struct work_struct
*work
)
940 struct mm_struct
*mm
;
942 mm
= container_of(work
, struct mm_struct
, async_put_work
);
946 static void mmdrop_async(struct mm_struct
*mm
)
948 if (unlikely(atomic_dec_and_test(&mm
->mm_count
))) {
949 INIT_WORK(&mm
->async_put_work
, mmdrop_async_fn
);
950 schedule_work(&mm
->async_put_work
);
954 static inline void free_signal_struct(struct signal_struct
*sig
)
956 taskstats_tgid_free(sig
);
957 sched_autogroup_exit(sig
);
959 * __mmdrop is not safe to call from softirq context on x86 due to
960 * pgd_dtor so postpone it to the async context
963 mmdrop_async(sig
->oom_mm
);
964 kmem_cache_free(signal_cachep
, sig
);
967 static inline void put_signal_struct(struct signal_struct
*sig
)
969 if (refcount_dec_and_test(&sig
->sigcnt
))
970 free_signal_struct(sig
);
973 void __put_task_struct(struct task_struct
*tsk
)
975 WARN_ON(!tsk
->exit_state
);
976 WARN_ON(refcount_read(&tsk
->usage
));
977 WARN_ON(tsk
== current
);
982 task_numa_free(tsk
, true);
983 security_task_free(tsk
);
985 delayacct_tsk_free(tsk
);
986 put_signal_struct(tsk
->signal
);
987 sched_core_free(tsk
);
990 EXPORT_SYMBOL_GPL(__put_task_struct
);
992 void __put_task_struct_rcu_cb(struct rcu_head
*rhp
)
994 struct task_struct
*task
= container_of(rhp
, struct task_struct
, rcu
);
996 __put_task_struct(task
);
998 EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb
);
1000 void __init __weak
arch_task_cache_init(void) { }
1005 static void __init
set_max_threads(unsigned int max_threads_suggested
)
1008 unsigned long nr_pages
= memblock_estimated_nr_free_pages();
1011 * The number of threads shall be limited such that the thread
1012 * structures may only consume a small part of the available memory.
1014 if (fls64(nr_pages
) + fls64(PAGE_SIZE
) > 64)
1015 threads
= MAX_THREADS
;
1017 threads
= div64_u64((u64
) nr_pages
* (u64
) PAGE_SIZE
,
1018 (u64
) THREAD_SIZE
* 8UL);
1020 if (threads
> max_threads_suggested
)
1021 threads
= max_threads_suggested
;
1023 max_threads
= clamp_t(u64
, threads
, MIN_THREADS
, MAX_THREADS
);
1026 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1027 /* Initialized by the architecture: */
1028 int arch_task_struct_size __read_mostly
;
1031 static void __init
task_struct_whitelist(unsigned long *offset
, unsigned long *size
)
1033 /* Fetch thread_struct whitelist for the architecture. */
1034 arch_thread_struct_whitelist(offset
, size
);
1037 * Handle zero-sized whitelist or empty thread_struct, otherwise
1038 * adjust offset to position of thread_struct in task_struct.
1040 if (unlikely(*size
== 0))
1043 *offset
+= offsetof(struct task_struct
, thread
);
1046 void __init
fork_init(void)
1049 #ifndef ARCH_MIN_TASKALIGN
1050 #define ARCH_MIN_TASKALIGN 0
1052 int align
= max_t(int, L1_CACHE_BYTES
, ARCH_MIN_TASKALIGN
);
1053 unsigned long useroffset
, usersize
;
1055 /* create a slab on which task_structs can be allocated */
1056 task_struct_whitelist(&useroffset
, &usersize
);
1057 task_struct_cachep
= kmem_cache_create_usercopy("task_struct",
1058 arch_task_struct_size
, align
,
1059 SLAB_PANIC
|SLAB_ACCOUNT
,
1060 useroffset
, usersize
, NULL
);
1062 /* do the arch specific task caches init */
1063 arch_task_cache_init();
1065 set_max_threads(MAX_THREADS
);
1067 init_task
.signal
->rlim
[RLIMIT_NPROC
].rlim_cur
= max_threads
/2;
1068 init_task
.signal
->rlim
[RLIMIT_NPROC
].rlim_max
= max_threads
/2;
1069 init_task
.signal
->rlim
[RLIMIT_SIGPENDING
] =
1070 init_task
.signal
->rlim
[RLIMIT_NPROC
];
1072 for (i
= 0; i
< UCOUNT_COUNTS
; i
++)
1073 init_user_ns
.ucount_max
[i
] = max_threads
/2;
1075 set_userns_rlimit_max(&init_user_ns
, UCOUNT_RLIMIT_NPROC
, RLIM_INFINITY
);
1076 set_userns_rlimit_max(&init_user_ns
, UCOUNT_RLIMIT_MSGQUEUE
, RLIM_INFINITY
);
1077 set_userns_rlimit_max(&init_user_ns
, UCOUNT_RLIMIT_SIGPENDING
, RLIM_INFINITY
);
1078 set_userns_rlimit_max(&init_user_ns
, UCOUNT_RLIMIT_MEMLOCK
, RLIM_INFINITY
);
1080 #ifdef CONFIG_VMAP_STACK
1081 cpuhp_setup_state(CPUHP_BP_PREPARE_DYN
, "fork:vm_stack_cache",
1082 NULL
, free_vm_stack_cache
);
1087 lockdep_init_task(&init_task
);
1091 int __weak
arch_dup_task_struct(struct task_struct
*dst
,
1092 struct task_struct
*src
)
1098 void set_task_stack_end_magic(struct task_struct
*tsk
)
1100 unsigned long *stackend
;
1102 stackend
= end_of_stack(tsk
);
1103 *stackend
= STACK_END_MAGIC
; /* for overflow detection */
1106 static struct task_struct
*dup_task_struct(struct task_struct
*orig
, int node
)
1108 struct task_struct
*tsk
;
1111 if (node
== NUMA_NO_NODE
)
1112 node
= tsk_fork_get_node(orig
);
1113 tsk
= alloc_task_struct_node(node
);
1117 err
= arch_dup_task_struct(tsk
, orig
);
1121 err
= alloc_thread_stack_node(tsk
, node
);
1125 #ifdef CONFIG_THREAD_INFO_IN_TASK
1126 refcount_set(&tsk
->stack_refcount
, 1);
1128 account_kernel_stack(tsk
, 1);
1130 err
= scs_prepare(tsk
, node
);
1134 #ifdef CONFIG_SECCOMP
1136 * We must handle setting up seccomp filters once we're under
1137 * the sighand lock in case orig has changed between now and
1138 * then. Until then, filter must be NULL to avoid messing up
1139 * the usage counts on the error path calling free_task.
1141 tsk
->seccomp
.filter
= NULL
;
1144 setup_thread_stack(tsk
, orig
);
1145 clear_user_return_notifier(tsk
);
1146 clear_tsk_need_resched(tsk
);
1147 set_task_stack_end_magic(tsk
);
1148 clear_syscall_work_syscall_user_dispatch(tsk
);
1150 #ifdef CONFIG_STACKPROTECTOR
1151 tsk
->stack_canary
= get_random_canary();
1153 if (orig
->cpus_ptr
== &orig
->cpus_mask
)
1154 tsk
->cpus_ptr
= &tsk
->cpus_mask
;
1155 dup_user_cpus_ptr(tsk
, orig
, node
);
1158 * One for the user space visible state that goes away when reaped.
1159 * One for the scheduler.
1161 refcount_set(&tsk
->rcu_users
, 2);
1162 /* One for the rcu users */
1163 refcount_set(&tsk
->usage
, 1);
1164 #ifdef CONFIG_BLK_DEV_IO_TRACE
1165 tsk
->btrace_seq
= 0;
1167 tsk
->splice_pipe
= NULL
;
1168 tsk
->task_frag
.page
= NULL
;
1169 tsk
->wake_q
.next
= NULL
;
1170 tsk
->worker_private
= NULL
;
1172 kcov_task_init(tsk
);
1173 kmsan_task_create(tsk
);
1174 kmap_local_fork(tsk
);
1176 #ifdef CONFIG_FAULT_INJECTION
1180 #ifdef CONFIG_BLK_CGROUP
1181 tsk
->throttle_disk
= NULL
;
1182 tsk
->use_memdelay
= 0;
1185 #ifdef CONFIG_ARCH_HAS_CPU_PASID
1186 tsk
->pasid_activated
= 0;
1190 tsk
->active_memcg
= NULL
;
1193 #ifdef CONFIG_X86_BUS_LOCK_DETECT
1194 tsk
->reported_split_lock
= 0;
1197 #ifdef CONFIG_SCHED_MM_CID
1199 tsk
->last_mm_cid
= -1;
1200 tsk
->mm_cid_active
= 0;
1201 tsk
->migrate_from_cpu
= -1;
1206 exit_task_stack_account(tsk
);
1207 free_thread_stack(tsk
);
1209 free_task_struct(tsk
);
1213 __cacheline_aligned_in_smp
DEFINE_SPINLOCK(mmlist_lock
);
1215 static unsigned long default_dump_filter
= MMF_DUMP_FILTER_DEFAULT
;
1217 static int __init
coredump_filter_setup(char *s
)
1219 default_dump_filter
=
1220 (simple_strtoul(s
, NULL
, 0) << MMF_DUMP_FILTER_SHIFT
) &
1221 MMF_DUMP_FILTER_MASK
;
1225 __setup("coredump_filter=", coredump_filter_setup
);
1227 #include <linux/init_task.h>
1229 static void mm_init_aio(struct mm_struct
*mm
)
1232 spin_lock_init(&mm
->ioctx_lock
);
1233 mm
->ioctx_table
= NULL
;
1237 static __always_inline
void mm_clear_owner(struct mm_struct
*mm
,
1238 struct task_struct
*p
)
1242 WRITE_ONCE(mm
->owner
, NULL
);
1246 static void mm_init_owner(struct mm_struct
*mm
, struct task_struct
*p
)
1253 static void mm_init_uprobes_state(struct mm_struct
*mm
)
1255 #ifdef CONFIG_UPROBES
1256 mm
->uprobes_state
.xol_area
= NULL
;
1260 static struct mm_struct
*mm_init(struct mm_struct
*mm
, struct task_struct
*p
,
1261 struct user_namespace
*user_ns
)
1263 mt_init_flags(&mm
->mm_mt
, MM_MT_FLAGS
);
1264 mt_set_external_lock(&mm
->mm_mt
, &mm
->mmap_lock
);
1265 atomic_set(&mm
->mm_users
, 1);
1266 atomic_set(&mm
->mm_count
, 1);
1267 seqcount_init(&mm
->write_protect_seq
);
1269 INIT_LIST_HEAD(&mm
->mmlist
);
1270 #ifdef CONFIG_PER_VMA_LOCK
1271 mm
->mm_lock_seq
= 0;
1273 mm_pgtables_bytes_init(mm
);
1276 atomic64_set(&mm
->pinned_vm
, 0);
1277 memset(&mm
->rss_stat
, 0, sizeof(mm
->rss_stat
));
1278 spin_lock_init(&mm
->page_table_lock
);
1279 spin_lock_init(&mm
->arg_lock
);
1280 mm_init_cpumask(mm
);
1282 mm_init_owner(mm
, p
);
1284 RCU_INIT_POINTER(mm
->exe_file
, NULL
);
1285 mmu_notifier_subscriptions_init(mm
);
1286 init_tlb_flush_pending(mm
);
1287 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
1288 mm
->pmd_huge_pte
= NULL
;
1290 mm_init_uprobes_state(mm
);
1291 hugetlb_count_init(mm
);
1294 mm
->flags
= mmf_init_flags(current
->mm
->flags
);
1295 mm
->def_flags
= current
->mm
->def_flags
& VM_INIT_DEF_MASK
;
1297 mm
->flags
= default_dump_filter
;
1301 if (mm_alloc_pgd(mm
))
1304 if (init_new_context(p
, mm
))
1305 goto fail_nocontext
;
1307 if (mm_alloc_cid(mm
, p
))
1310 if (percpu_counter_init_many(mm
->rss_stat
, 0, GFP_KERNEL_ACCOUNT
,
1314 mm
->user_ns
= get_user_ns(user_ns
);
1315 lru_gen_init_mm(mm
);
1321 destroy_context(mm
);
1330 * Allocate and initialize an mm_struct.
1332 struct mm_struct
*mm_alloc(void)
1334 struct mm_struct
*mm
;
1340 memset(mm
, 0, sizeof(*mm
));
1341 return mm_init(mm
, current
, current_user_ns());
1343 EXPORT_SYMBOL_IF_KUNIT(mm_alloc
);
1345 static inline void __mmput(struct mm_struct
*mm
)
1347 VM_BUG_ON(atomic_read(&mm
->mm_users
));
1349 uprobe_clear_state(mm
);
1352 khugepaged_exit(mm
); /* must run before exit_mmap */
1354 mm_put_huge_zero_folio(mm
);
1355 set_mm_exe_file(mm
, NULL
);
1356 if (!list_empty(&mm
->mmlist
)) {
1357 spin_lock(&mmlist_lock
);
1358 list_del(&mm
->mmlist
);
1359 spin_unlock(&mmlist_lock
);
1362 module_put(mm
->binfmt
->module
);
1368 * Decrement the use count and release all resources for an mm.
1370 void mmput(struct mm_struct
*mm
)
1374 if (atomic_dec_and_test(&mm
->mm_users
))
1377 EXPORT_SYMBOL_GPL(mmput
);
1380 static void mmput_async_fn(struct work_struct
*work
)
1382 struct mm_struct
*mm
= container_of(work
, struct mm_struct
,
1388 void mmput_async(struct mm_struct
*mm
)
1390 if (atomic_dec_and_test(&mm
->mm_users
)) {
1391 INIT_WORK(&mm
->async_put_work
, mmput_async_fn
);
1392 schedule_work(&mm
->async_put_work
);
1395 EXPORT_SYMBOL_GPL(mmput_async
);
1399 * set_mm_exe_file - change a reference to the mm's executable file
1400 * @mm: The mm to change.
1401 * @new_exe_file: The new file to use.
1403 * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
1405 * Main users are mmput() and sys_execve(). Callers prevent concurrent
1406 * invocations: in mmput() nobody alive left, in execve it happens before
1407 * the new mm is made visible to anyone.
1409 * Can only fail if new_exe_file != NULL.
1411 int set_mm_exe_file(struct mm_struct
*mm
, struct file
*new_exe_file
)
1413 struct file
*old_exe_file
;
1416 * It is safe to dereference the exe_file without RCU as
1417 * this function is only called if nobody else can access
1418 * this mm -- see comment above for justification.
1420 old_exe_file
= rcu_dereference_raw(mm
->exe_file
);
1424 * We expect the caller (i.e., sys_execve) to already denied
1425 * write access, so this is unlikely to fail.
1427 if (unlikely(deny_write_access(new_exe_file
)))
1429 get_file(new_exe_file
);
1431 rcu_assign_pointer(mm
->exe_file
, new_exe_file
);
1433 allow_write_access(old_exe_file
);
1440 * replace_mm_exe_file - replace a reference to the mm's executable file
1441 * @mm: The mm to change.
1442 * @new_exe_file: The new file to use.
1444 * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
1446 * Main user is sys_prctl(PR_SET_MM_MAP/EXE_FILE).
1448 int replace_mm_exe_file(struct mm_struct
*mm
, struct file
*new_exe_file
)
1450 struct vm_area_struct
*vma
;
1451 struct file
*old_exe_file
;
1454 /* Forbid mm->exe_file change if old file still mapped. */
1455 old_exe_file
= get_mm_exe_file(mm
);
1457 VMA_ITERATOR(vmi
, mm
, 0);
1459 for_each_vma(vmi
, vma
) {
1462 if (path_equal(&vma
->vm_file
->f_path
,
1463 &old_exe_file
->f_path
)) {
1468 mmap_read_unlock(mm
);
1474 ret
= deny_write_access(new_exe_file
);
1477 get_file(new_exe_file
);
1479 /* set the new file */
1480 mmap_write_lock(mm
);
1481 old_exe_file
= rcu_dereference_raw(mm
->exe_file
);
1482 rcu_assign_pointer(mm
->exe_file
, new_exe_file
);
1483 mmap_write_unlock(mm
);
1486 allow_write_access(old_exe_file
);
1493 * get_mm_exe_file - acquire a reference to the mm's executable file
1494 * @mm: The mm of interest.
1496 * Returns %NULL if mm has no associated executable file.
1497 * User must release file via fput().
1499 struct file
*get_mm_exe_file(struct mm_struct
*mm
)
1501 struct file
*exe_file
;
1504 exe_file
= get_file_rcu(&mm
->exe_file
);
1510 * get_task_exe_file - acquire a reference to the task's executable file
1513 * Returns %NULL if task's mm (if any) has no associated executable file or
1514 * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
1515 * User must release file via fput().
1517 struct file
*get_task_exe_file(struct task_struct
*task
)
1519 struct file
*exe_file
= NULL
;
1520 struct mm_struct
*mm
;
1525 if (!(task
->flags
& PF_KTHREAD
))
1526 exe_file
= get_mm_exe_file(mm
);
1533 * get_task_mm - acquire a reference to the task's mm
1536 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
1537 * this kernel workthread has transiently adopted a user mm with use_mm,
1538 * to do its AIO) is not set and if so returns a reference to it, after
1539 * bumping up the use count. User must release the mm via mmput()
1540 * after use. Typically used by /proc and ptrace.
1542 struct mm_struct
*get_task_mm(struct task_struct
*task
)
1544 struct mm_struct
*mm
;
1546 if (task
->flags
& PF_KTHREAD
)
1556 EXPORT_SYMBOL_GPL(get_task_mm
);
1558 struct mm_struct
*mm_access(struct task_struct
*task
, unsigned int mode
)
1560 struct mm_struct
*mm
;
1563 err
= down_read_killable(&task
->signal
->exec_update_lock
);
1565 return ERR_PTR(err
);
1567 mm
= get_task_mm(task
);
1569 mm
= ERR_PTR(-ESRCH
);
1570 } else if (mm
!= current
->mm
&& !ptrace_may_access(task
, mode
)) {
1572 mm
= ERR_PTR(-EACCES
);
1574 up_read(&task
->signal
->exec_update_lock
);
1579 static void complete_vfork_done(struct task_struct
*tsk
)
1581 struct completion
*vfork
;
1584 vfork
= tsk
->vfork_done
;
1585 if (likely(vfork
)) {
1586 tsk
->vfork_done
= NULL
;
1592 static int wait_for_vfork_done(struct task_struct
*child
,
1593 struct completion
*vfork
)
1595 unsigned int state
= TASK_KILLABLE
|TASK_FREEZABLE
;
1598 cgroup_enter_frozen();
1599 killed
= wait_for_completion_state(vfork
, state
);
1600 cgroup_leave_frozen(false);
1604 child
->vfork_done
= NULL
;
1608 put_task_struct(child
);
1612 /* Please note the differences between mmput and mm_release.
1613 * mmput is called whenever we stop holding onto a mm_struct,
1614 * error success whatever.
1616 * mm_release is called after a mm_struct has been removed
1617 * from the current process.
1619 * This difference is important for error handling, when we
1620 * only half set up a mm_struct for a new process and need to restore
1621 * the old one. Because we mmput the new mm_struct before
1622 * restoring the old one. . .
1623 * Eric Biederman 10 January 1998
1625 static void mm_release(struct task_struct
*tsk
, struct mm_struct
*mm
)
1627 uprobe_free_utask(tsk
);
1629 /* Get rid of any cached register state */
1630 deactivate_mm(tsk
, mm
);
1633 * Signal userspace if we're not exiting with a core dump
1634 * because we want to leave the value intact for debugging
1637 if (tsk
->clear_child_tid
) {
1638 if (atomic_read(&mm
->mm_users
) > 1) {
1640 * We don't check the error code - if userspace has
1641 * not set up a proper pointer then tough luck.
1643 put_user(0, tsk
->clear_child_tid
);
1644 do_futex(tsk
->clear_child_tid
, FUTEX_WAKE
,
1645 1, NULL
, NULL
, 0, 0);
1647 tsk
->clear_child_tid
= NULL
;
1651 * All done, finally we can wake up parent and return this mm to him.
1652 * Also kthread_stop() uses this completion for synchronization.
1654 if (tsk
->vfork_done
)
1655 complete_vfork_done(tsk
);
1658 void exit_mm_release(struct task_struct
*tsk
, struct mm_struct
*mm
)
1660 futex_exit_release(tsk
);
1661 mm_release(tsk
, mm
);
1664 void exec_mm_release(struct task_struct
*tsk
, struct mm_struct
*mm
)
1666 futex_exec_release(tsk
);
1667 mm_release(tsk
, mm
);
1671 * dup_mm() - duplicates an existing mm structure
1672 * @tsk: the task_struct with which the new mm will be associated.
1673 * @oldmm: the mm to duplicate.
1675 * Allocates a new mm structure and duplicates the provided @oldmm structure
1678 * Return: the duplicated mm or NULL on failure.
1680 static struct mm_struct
*dup_mm(struct task_struct
*tsk
,
1681 struct mm_struct
*oldmm
)
1683 struct mm_struct
*mm
;
1690 memcpy(mm
, oldmm
, sizeof(*mm
));
1692 if (!mm_init(mm
, tsk
, mm
->user_ns
))
1695 err
= dup_mmap(mm
, oldmm
);
1699 mm
->hiwater_rss
= get_mm_rss(mm
);
1700 mm
->hiwater_vm
= mm
->total_vm
;
1702 if (mm
->binfmt
&& !try_module_get(mm
->binfmt
->module
))
1708 /* don't put binfmt in mmput, we haven't got module yet */
1710 mm_init_owner(mm
, NULL
);
1717 static int copy_mm(unsigned long clone_flags
, struct task_struct
*tsk
)
1719 struct mm_struct
*mm
, *oldmm
;
1721 tsk
->min_flt
= tsk
->maj_flt
= 0;
1722 tsk
->nvcsw
= tsk
->nivcsw
= 0;
1723 #ifdef CONFIG_DETECT_HUNG_TASK
1724 tsk
->last_switch_count
= tsk
->nvcsw
+ tsk
->nivcsw
;
1725 tsk
->last_switch_time
= 0;
1729 tsk
->active_mm
= NULL
;
1732 * Are we cloning a kernel thread?
1734 * We need to steal a active VM for that..
1736 oldmm
= current
->mm
;
1740 if (clone_flags
& CLONE_VM
) {
1744 mm
= dup_mm(tsk
, current
->mm
);
1750 tsk
->active_mm
= mm
;
1751 sched_mm_cid_fork(tsk
);
1755 static int copy_fs(unsigned long clone_flags
, struct task_struct
*tsk
)
1757 struct fs_struct
*fs
= current
->fs
;
1758 if (clone_flags
& CLONE_FS
) {
1759 /* tsk->fs is already what we want */
1760 spin_lock(&fs
->lock
);
1761 /* "users" and "in_exec" locked for check_unsafe_exec() */
1763 spin_unlock(&fs
->lock
);
1767 spin_unlock(&fs
->lock
);
1770 tsk
->fs
= copy_fs_struct(fs
);
1776 static int copy_files(unsigned long clone_flags
, struct task_struct
*tsk
,
1779 struct files_struct
*oldf
, *newf
;
1782 * A background process may not have any files ...
1784 oldf
= current
->files
;
1793 if (clone_flags
& CLONE_FILES
) {
1794 atomic_inc(&oldf
->count
);
1798 newf
= dup_fd(oldf
, NULL
);
1800 return PTR_ERR(newf
);
1806 static int copy_sighand(unsigned long clone_flags
, struct task_struct
*tsk
)
1808 struct sighand_struct
*sig
;
1810 if (clone_flags
& CLONE_SIGHAND
) {
1811 refcount_inc(¤t
->sighand
->count
);
1814 sig
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
1815 RCU_INIT_POINTER(tsk
->sighand
, sig
);
1819 refcount_set(&sig
->count
, 1);
1820 spin_lock_irq(¤t
->sighand
->siglock
);
1821 memcpy(sig
->action
, current
->sighand
->action
, sizeof(sig
->action
));
1822 spin_unlock_irq(¤t
->sighand
->siglock
);
1824 /* Reset all signal handler not set to SIG_IGN to SIG_DFL. */
1825 if (clone_flags
& CLONE_CLEAR_SIGHAND
)
1826 flush_signal_handlers(tsk
, 0);
1831 void __cleanup_sighand(struct sighand_struct
*sighand
)
1833 if (refcount_dec_and_test(&sighand
->count
)) {
1834 signalfd_cleanup(sighand
);
1836 * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
1837 * without an RCU grace period, see __lock_task_sighand().
1839 kmem_cache_free(sighand_cachep
, sighand
);
1844 * Initialize POSIX timer handling for a thread group.
1846 static void posix_cpu_timers_init_group(struct signal_struct
*sig
)
1848 struct posix_cputimers
*pct
= &sig
->posix_cputimers
;
1849 unsigned long cpu_limit
;
1851 cpu_limit
= READ_ONCE(sig
->rlim
[RLIMIT_CPU
].rlim_cur
);
1852 posix_cputimers_group_init(pct
, cpu_limit
);
1855 static int copy_signal(unsigned long clone_flags
, struct task_struct
*tsk
)
1857 struct signal_struct
*sig
;
1859 if (clone_flags
& CLONE_THREAD
)
1862 sig
= kmem_cache_zalloc(signal_cachep
, GFP_KERNEL
);
1867 sig
->nr_threads
= 1;
1868 sig
->quick_threads
= 1;
1869 atomic_set(&sig
->live
, 1);
1870 refcount_set(&sig
->sigcnt
, 1);
1872 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1873 sig
->thread_head
= (struct list_head
)LIST_HEAD_INIT(tsk
->thread_node
);
1874 tsk
->thread_node
= (struct list_head
)LIST_HEAD_INIT(sig
->thread_head
);
1876 init_waitqueue_head(&sig
->wait_chldexit
);
1877 sig
->curr_target
= tsk
;
1878 init_sigpending(&sig
->shared_pending
);
1879 INIT_HLIST_HEAD(&sig
->multiprocess
);
1880 seqlock_init(&sig
->stats_lock
);
1881 prev_cputime_init(&sig
->prev_cputime
);
1883 #ifdef CONFIG_POSIX_TIMERS
1884 INIT_HLIST_HEAD(&sig
->posix_timers
);
1885 INIT_HLIST_HEAD(&sig
->ignored_posix_timers
);
1886 hrtimer_init(&sig
->real_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1887 sig
->real_timer
.function
= it_real_fn
;
1890 task_lock(current
->group_leader
);
1891 memcpy(sig
->rlim
, current
->signal
->rlim
, sizeof sig
->rlim
);
1892 task_unlock(current
->group_leader
);
1894 posix_cpu_timers_init_group(sig
);
1896 tty_audit_fork(sig
);
1897 sched_autogroup_fork(sig
);
1899 sig
->oom_score_adj
= current
->signal
->oom_score_adj
;
1900 sig
->oom_score_adj_min
= current
->signal
->oom_score_adj_min
;
1902 mutex_init(&sig
->cred_guard_mutex
);
1903 init_rwsem(&sig
->exec_update_lock
);
1908 static void copy_seccomp(struct task_struct
*p
)
1910 #ifdef CONFIG_SECCOMP
1912 * Must be called with sighand->lock held, which is common to
1913 * all threads in the group. Holding cred_guard_mutex is not
1914 * needed because this new task is not yet running and cannot
1917 assert_spin_locked(¤t
->sighand
->siglock
);
1919 /* Ref-count the new filter user, and assign it. */
1920 get_seccomp_filter(current
);
1921 p
->seccomp
= current
->seccomp
;
1924 * Explicitly enable no_new_privs here in case it got set
1925 * between the task_struct being duplicated and holding the
1926 * sighand lock. The seccomp state and nnp must be in sync.
1928 if (task_no_new_privs(current
))
1929 task_set_no_new_privs(p
);
1932 * If the parent gained a seccomp mode after copying thread
1933 * flags and between before we held the sighand lock, we have
1934 * to manually enable the seccomp thread flag here.
1936 if (p
->seccomp
.mode
!= SECCOMP_MODE_DISABLED
)
1937 set_task_syscall_work(p
, SECCOMP
);
1941 SYSCALL_DEFINE1(set_tid_address
, int __user
*, tidptr
)
1943 current
->clear_child_tid
= tidptr
;
1945 return task_pid_vnr(current
);
1948 static void rt_mutex_init_task(struct task_struct
*p
)
1950 raw_spin_lock_init(&p
->pi_lock
);
1951 #ifdef CONFIG_RT_MUTEXES
1952 p
->pi_waiters
= RB_ROOT_CACHED
;
1953 p
->pi_top_task
= NULL
;
1954 p
->pi_blocked_on
= NULL
;
1958 static inline void init_task_pid_links(struct task_struct
*task
)
1962 for (type
= PIDTYPE_PID
; type
< PIDTYPE_MAX
; ++type
)
1963 INIT_HLIST_NODE(&task
->pid_links
[type
]);
1967 init_task_pid(struct task_struct
*task
, enum pid_type type
, struct pid
*pid
)
1969 if (type
== PIDTYPE_PID
)
1970 task
->thread_pid
= pid
;
1972 task
->signal
->pids
[type
] = pid
;
1975 static inline void rcu_copy_process(struct task_struct
*p
)
1977 #ifdef CONFIG_PREEMPT_RCU
1978 p
->rcu_read_lock_nesting
= 0;
1979 p
->rcu_read_unlock_special
.s
= 0;
1980 p
->rcu_blocked_node
= NULL
;
1981 INIT_LIST_HEAD(&p
->rcu_node_entry
);
1982 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1983 #ifdef CONFIG_TASKS_RCU
1984 p
->rcu_tasks_holdout
= false;
1985 INIT_LIST_HEAD(&p
->rcu_tasks_holdout_list
);
1986 p
->rcu_tasks_idle_cpu
= -1;
1987 INIT_LIST_HEAD(&p
->rcu_tasks_exit_list
);
1988 #endif /* #ifdef CONFIG_TASKS_RCU */
1989 #ifdef CONFIG_TASKS_TRACE_RCU
1990 p
->trc_reader_nesting
= 0;
1991 p
->trc_reader_special
.s
= 0;
1992 INIT_LIST_HEAD(&p
->trc_holdout_list
);
1993 INIT_LIST_HEAD(&p
->trc_blkd_node
);
1994 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
1998 * __pidfd_prepare - allocate a new pidfd_file and reserve a pidfd
1999 * @pid: the struct pid for which to create a pidfd
2000 * @flags: flags of the new @pidfd
2001 * @ret: Where to return the file for the pidfd.
2003 * Allocate a new file that stashes @pid and reserve a new pidfd number in the
2004 * caller's file descriptor table. The pidfd is reserved but not installed yet.
2006 * The helper doesn't perform checks on @pid which makes it useful for pidfds
2007 * created via CLONE_PIDFD where @pid has no task attached when the pidfd and
2008 * pidfd file are prepared.
2010 * If this function returns successfully the caller is responsible to either
2011 * call fd_install() passing the returned pidfd and pidfd file as arguments in
2012 * order to install the pidfd into its file descriptor table or they must use
2013 * put_unused_fd() and fput() on the returned pidfd and pidfd file
2016 * This function is useful when a pidfd must already be reserved but there
2017 * might still be points of failure afterwards and the caller wants to ensure
2018 * that no pidfd is leaked into its file descriptor table.
2020 * Return: On success, a reserved pidfd is returned from the function and a new
2021 * pidfd file is returned in the last argument to the function. On
2022 * error, a negative error code is returned from the function and the
2023 * last argument remains unchanged.
2025 static int __pidfd_prepare(struct pid
*pid
, unsigned int flags
, struct file
**ret
)
2028 struct file
*pidfd_file
;
2030 pidfd
= get_unused_fd_flags(O_CLOEXEC
);
2034 pidfd_file
= pidfs_alloc_file(pid
, flags
| O_RDWR
);
2035 if (IS_ERR(pidfd_file
)) {
2036 put_unused_fd(pidfd
);
2037 return PTR_ERR(pidfd_file
);
2040 * anon_inode_getfile() ignores everything outside of the
2041 * O_ACCMODE | O_NONBLOCK mask, set PIDFD_THREAD manually.
2043 pidfd_file
->f_flags
|= (flags
& PIDFD_THREAD
);
2049 * pidfd_prepare - allocate a new pidfd_file and reserve a pidfd
2050 * @pid: the struct pid for which to create a pidfd
2051 * @flags: flags of the new @pidfd
2052 * @ret: Where to return the pidfd.
2054 * Allocate a new file that stashes @pid and reserve a new pidfd number in the
2055 * caller's file descriptor table. The pidfd is reserved but not installed yet.
2057 * The helper verifies that @pid is still in use, without PIDFD_THREAD the
2058 * task identified by @pid must be a thread-group leader.
2060 * If this function returns successfully the caller is responsible to either
2061 * call fd_install() passing the returned pidfd and pidfd file as arguments in
2062 * order to install the pidfd into its file descriptor table or they must use
2063 * put_unused_fd() and fput() on the returned pidfd and pidfd file
2066 * This function is useful when a pidfd must already be reserved but there
2067 * might still be points of failure afterwards and the caller wants to ensure
2068 * that no pidfd is leaked into its file descriptor table.
2070 * Return: On success, a reserved pidfd is returned from the function and a new
2071 * pidfd file is returned in the last argument to the function. On
2072 * error, a negative error code is returned from the function and the
2073 * last argument remains unchanged.
2075 int pidfd_prepare(struct pid
*pid
, unsigned int flags
, struct file
**ret
)
2077 bool thread
= flags
& PIDFD_THREAD
;
2079 if (!pid
|| !pid_has_task(pid
, thread
? PIDTYPE_PID
: PIDTYPE_TGID
))
2082 return __pidfd_prepare(pid
, flags
, ret
);
2085 static void __delayed_free_task(struct rcu_head
*rhp
)
2087 struct task_struct
*tsk
= container_of(rhp
, struct task_struct
, rcu
);
2092 static __always_inline
void delayed_free_task(struct task_struct
*tsk
)
2094 if (IS_ENABLED(CONFIG_MEMCG
))
2095 call_rcu(&tsk
->rcu
, __delayed_free_task
);
2100 static void copy_oom_score_adj(u64 clone_flags
, struct task_struct
*tsk
)
2102 /* Skip if kernel thread */
2106 /* Skip if spawning a thread or using vfork */
2107 if ((clone_flags
& (CLONE_VM
| CLONE_THREAD
| CLONE_VFORK
)) != CLONE_VM
)
2110 /* We need to synchronize with __set_oom_adj */
2111 mutex_lock(&oom_adj_mutex
);
2112 set_bit(MMF_MULTIPROCESS
, &tsk
->mm
->flags
);
2113 /* Update the values in case they were changed after copy_signal */
2114 tsk
->signal
->oom_score_adj
= current
->signal
->oom_score_adj
;
2115 tsk
->signal
->oom_score_adj_min
= current
->signal
->oom_score_adj_min
;
2116 mutex_unlock(&oom_adj_mutex
);
2120 static void rv_task_fork(struct task_struct
*p
)
2124 for (i
= 0; i
< RV_PER_TASK_MONITORS
; i
++)
2125 p
->rv
[i
].da_mon
.monitoring
= false;
2128 #define rv_task_fork(p) do {} while (0)
2132 * This creates a new process as a copy of the old one,
2133 * but does not actually start it yet.
2135 * It copies the registers, and all the appropriate
2136 * parts of the process environment (as per the clone
2137 * flags). The actual kick-off is left to the caller.
2139 __latent_entropy
struct task_struct
*copy_process(
2143 struct kernel_clone_args
*args
)
2145 int pidfd
= -1, retval
;
2146 struct task_struct
*p
;
2147 struct multiprocess_signals delayed
;
2148 struct file
*pidfile
= NULL
;
2149 const u64 clone_flags
= args
->flags
;
2150 struct nsproxy
*nsp
= current
->nsproxy
;
2153 * Don't allow sharing the root directory with processes in a different
2156 if ((clone_flags
& (CLONE_NEWNS
|CLONE_FS
)) == (CLONE_NEWNS
|CLONE_FS
))
2157 return ERR_PTR(-EINVAL
);
2159 if ((clone_flags
& (CLONE_NEWUSER
|CLONE_FS
)) == (CLONE_NEWUSER
|CLONE_FS
))
2160 return ERR_PTR(-EINVAL
);
2163 * Thread groups must share signals as well, and detached threads
2164 * can only be started up within the thread group.
2166 if ((clone_flags
& CLONE_THREAD
) && !(clone_flags
& CLONE_SIGHAND
))
2167 return ERR_PTR(-EINVAL
);
2170 * Shared signal handlers imply shared VM. By way of the above,
2171 * thread groups also imply shared VM. Blocking this case allows
2172 * for various simplifications in other code.
2174 if ((clone_flags
& CLONE_SIGHAND
) && !(clone_flags
& CLONE_VM
))
2175 return ERR_PTR(-EINVAL
);
2178 * Siblings of global init remain as zombies on exit since they are
2179 * not reaped by their parent (swapper). To solve this and to avoid
2180 * multi-rooted process trees, prevent global and container-inits
2181 * from creating siblings.
2183 if ((clone_flags
& CLONE_PARENT
) &&
2184 current
->signal
->flags
& SIGNAL_UNKILLABLE
)
2185 return ERR_PTR(-EINVAL
);
2188 * If the new process will be in a different pid or user namespace
2189 * do not allow it to share a thread group with the forking task.
2191 if (clone_flags
& CLONE_THREAD
) {
2192 if ((clone_flags
& (CLONE_NEWUSER
| CLONE_NEWPID
)) ||
2193 (task_active_pid_ns(current
) != nsp
->pid_ns_for_children
))
2194 return ERR_PTR(-EINVAL
);
2197 if (clone_flags
& CLONE_PIDFD
) {
2199 * - CLONE_DETACHED is blocked so that we can potentially
2200 * reuse it later for CLONE_PIDFD.
2202 if (clone_flags
& CLONE_DETACHED
)
2203 return ERR_PTR(-EINVAL
);
2207 * Force any signals received before this point to be delivered
2208 * before the fork happens. Collect up signals sent to multiple
2209 * processes that happen during the fork and delay them so that
2210 * they appear to happen after the fork.
2212 sigemptyset(&delayed
.signal
);
2213 INIT_HLIST_NODE(&delayed
.node
);
2215 spin_lock_irq(¤t
->sighand
->siglock
);
2216 if (!(clone_flags
& CLONE_THREAD
))
2217 hlist_add_head(&delayed
.node
, ¤t
->signal
->multiprocess
);
2218 recalc_sigpending();
2219 spin_unlock_irq(¤t
->sighand
->siglock
);
2220 retval
= -ERESTARTNOINTR
;
2221 if (task_sigpending(current
))
2225 p
= dup_task_struct(current
, node
);
2228 p
->flags
&= ~PF_KTHREAD
;
2230 p
->flags
|= PF_KTHREAD
;
2231 if (args
->user_worker
) {
2233 * Mark us a user worker, and block any signal that isn't
2236 p
->flags
|= PF_USER_WORKER
;
2237 siginitsetinv(&p
->blocked
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2239 if (args
->io_thread
)
2240 p
->flags
|= PF_IO_WORKER
;
2243 strscpy_pad(p
->comm
, args
->name
, sizeof(p
->comm
));
2245 p
->set_child_tid
= (clone_flags
& CLONE_CHILD_SETTID
) ? args
->child_tid
: NULL
;
2247 * Clear TID on mm_release()?
2249 p
->clear_child_tid
= (clone_flags
& CLONE_CHILD_CLEARTID
) ? args
->child_tid
: NULL
;
2251 ftrace_graph_init_task(p
);
2253 rt_mutex_init_task(p
);
2255 lockdep_assert_irqs_enabled();
2256 #ifdef CONFIG_PROVE_LOCKING
2257 DEBUG_LOCKS_WARN_ON(!p
->softirqs_enabled
);
2259 retval
= copy_creds(p
, clone_flags
);
2264 if (is_rlimit_overlimit(task_ucounts(p
), UCOUNT_RLIMIT_NPROC
, rlimit(RLIMIT_NPROC
))) {
2265 if (p
->real_cred
->user
!= INIT_USER
&&
2266 !capable(CAP_SYS_RESOURCE
) && !capable(CAP_SYS_ADMIN
))
2267 goto bad_fork_cleanup_count
;
2269 current
->flags
&= ~PF_NPROC_EXCEEDED
;
2272 * If multiple threads are within copy_process(), then this check
2273 * triggers too late. This doesn't hurt, the check is only there
2274 * to stop root fork bombs.
2277 if (data_race(nr_threads
>= max_threads
))
2278 goto bad_fork_cleanup_count
;
2280 delayacct_tsk_init(p
); /* Must remain after dup_task_struct() */
2281 p
->flags
&= ~(PF_SUPERPRIV
| PF_WQ_WORKER
| PF_IDLE
| PF_NO_SETAFFINITY
);
2282 p
->flags
|= PF_FORKNOEXEC
;
2283 INIT_LIST_HEAD(&p
->children
);
2284 INIT_LIST_HEAD(&p
->sibling
);
2285 rcu_copy_process(p
);
2286 p
->vfork_done
= NULL
;
2287 spin_lock_init(&p
->alloc_lock
);
2289 init_sigpending(&p
->pending
);
2291 p
->utime
= p
->stime
= p
->gtime
= 0;
2292 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
2293 p
->utimescaled
= p
->stimescaled
= 0;
2295 prev_cputime_init(&p
->prev_cputime
);
2297 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2298 seqcount_init(&p
->vtime
.seqcount
);
2299 p
->vtime
.starttime
= 0;
2300 p
->vtime
.state
= VTIME_INACTIVE
;
2303 #ifdef CONFIG_IO_URING
2307 p
->default_timer_slack_ns
= current
->timer_slack_ns
;
2313 task_io_accounting_init(&p
->ioac
);
2314 acct_clear_integrals(p
);
2316 posix_cputimers_init(&p
->posix_cputimers
);
2317 tick_dep_init_task(p
);
2319 p
->io_context
= NULL
;
2320 audit_set_context(p
, NULL
);
2322 if (args
->kthread
) {
2323 if (!set_kthread_struct(p
))
2324 goto bad_fork_cleanup_delayacct
;
2327 p
->mempolicy
= mpol_dup(p
->mempolicy
);
2328 if (IS_ERR(p
->mempolicy
)) {
2329 retval
= PTR_ERR(p
->mempolicy
);
2330 p
->mempolicy
= NULL
;
2331 goto bad_fork_cleanup_delayacct
;
2334 #ifdef CONFIG_CPUSETS
2335 p
->cpuset_mem_spread_rotor
= NUMA_NO_NODE
;
2336 seqcount_spinlock_init(&p
->mems_allowed_seq
, &p
->alloc_lock
);
2338 #ifdef CONFIG_TRACE_IRQFLAGS
2339 memset(&p
->irqtrace
, 0, sizeof(p
->irqtrace
));
2340 p
->irqtrace
.hardirq_disable_ip
= _THIS_IP_
;
2341 p
->irqtrace
.softirq_enable_ip
= _THIS_IP_
;
2342 p
->softirqs_enabled
= 1;
2343 p
->softirq_context
= 0;
2346 p
->pagefault_disabled
= 0;
2348 #ifdef CONFIG_LOCKDEP
2349 lockdep_init_task(p
);
2352 #ifdef CONFIG_DEBUG_MUTEXES
2353 p
->blocked_on
= NULL
; /* not blocked yet */
2355 #ifdef CONFIG_BCACHE
2356 p
->sequential_io
= 0;
2357 p
->sequential_io_avg
= 0;
2359 #ifdef CONFIG_BPF_SYSCALL
2360 RCU_INIT_POINTER(p
->bpf_storage
, NULL
);
2364 /* Perform scheduler related setup. Assign this task to a CPU. */
2365 retval
= sched_fork(clone_flags
, p
);
2367 goto bad_fork_cleanup_policy
;
2369 retval
= perf_event_init_task(p
, clone_flags
);
2371 goto bad_fork_sched_cancel_fork
;
2372 retval
= audit_alloc(p
);
2374 goto bad_fork_cleanup_perf
;
2375 /* copy all the process information */
2377 retval
= security_task_alloc(p
, clone_flags
);
2379 goto bad_fork_cleanup_audit
;
2380 retval
= copy_semundo(clone_flags
, p
);
2382 goto bad_fork_cleanup_security
;
2383 retval
= copy_files(clone_flags
, p
, args
->no_files
);
2385 goto bad_fork_cleanup_semundo
;
2386 retval
= copy_fs(clone_flags
, p
);
2388 goto bad_fork_cleanup_files
;
2389 retval
= copy_sighand(clone_flags
, p
);
2391 goto bad_fork_cleanup_fs
;
2392 retval
= copy_signal(clone_flags
, p
);
2394 goto bad_fork_cleanup_sighand
;
2395 retval
= copy_mm(clone_flags
, p
);
2397 goto bad_fork_cleanup_signal
;
2398 retval
= copy_namespaces(clone_flags
, p
);
2400 goto bad_fork_cleanup_mm
;
2401 retval
= copy_io(clone_flags
, p
);
2403 goto bad_fork_cleanup_namespaces
;
2404 retval
= copy_thread(p
, args
);
2406 goto bad_fork_cleanup_io
;
2408 stackleak_task_init(p
);
2410 if (pid
!= &init_struct_pid
) {
2411 pid
= alloc_pid(p
->nsproxy
->pid_ns_for_children
, args
->set_tid
,
2412 args
->set_tid_size
);
2414 retval
= PTR_ERR(pid
);
2415 goto bad_fork_cleanup_thread
;
2420 * This has to happen after we've potentially unshared the file
2421 * descriptor table (so that the pidfd doesn't leak into the child
2422 * if the fd table isn't shared).
2424 if (clone_flags
& CLONE_PIDFD
) {
2425 int flags
= (clone_flags
& CLONE_THREAD
) ? PIDFD_THREAD
: 0;
2427 /* Note that no task has been attached to @pid yet. */
2428 retval
= __pidfd_prepare(pid
, flags
, &pidfile
);
2430 goto bad_fork_free_pid
;
2433 retval
= put_user(pidfd
, args
->pidfd
);
2435 goto bad_fork_put_pidfd
;
2444 * sigaltstack should be cleared when sharing the same VM
2446 if ((clone_flags
& (CLONE_VM
|CLONE_VFORK
)) == CLONE_VM
)
2450 * Syscall tracing and stepping should be turned off in the
2451 * child regardless of CLONE_PTRACE.
2453 user_disable_single_step(p
);
2454 clear_task_syscall_work(p
, SYSCALL_TRACE
);
2455 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
2456 clear_task_syscall_work(p
, SYSCALL_EMU
);
2458 clear_tsk_latency_tracing(p
);
2460 /* ok, now we should be set up.. */
2461 p
->pid
= pid_nr(pid
);
2462 if (clone_flags
& CLONE_THREAD
) {
2463 p
->group_leader
= current
->group_leader
;
2464 p
->tgid
= current
->tgid
;
2466 p
->group_leader
= p
;
2471 p
->nr_dirtied_pause
= 128 >> (PAGE_SHIFT
- 10);
2472 p
->dirty_paused_when
= 0;
2474 p
->pdeath_signal
= 0;
2475 p
->task_works
= NULL
;
2476 clear_posix_cputimers_work(p
);
2478 #ifdef CONFIG_KRETPROBES
2479 p
->kretprobe_instances
.first
= NULL
;
2481 #ifdef CONFIG_RETHOOK
2482 p
->rethooks
.first
= NULL
;
2486 * Ensure that the cgroup subsystem policies allow the new process to be
2487 * forked. It should be noted that the new process's css_set can be changed
2488 * between here and cgroup_post_fork() if an organisation operation is in
2491 retval
= cgroup_can_fork(p
, args
);
2493 goto bad_fork_put_pidfd
;
2496 * Now that the cgroups are pinned, re-clone the parent cgroup and put
2497 * the new task on the correct runqueue. All this *before* the task
2500 * This isn't part of ->can_fork() because while the re-cloning is
2501 * cgroup specific, it unconditionally needs to place the task on a
2504 retval
= sched_cgroup_fork(p
, args
);
2506 goto bad_fork_cancel_cgroup
;
2509 * From this point on we must avoid any synchronous user-space
2510 * communication until we take the tasklist-lock. In particular, we do
2511 * not want user-space to be able to predict the process start-time by
2512 * stalling fork(2) after we recorded the start_time but before it is
2513 * visible to the system.
2516 p
->start_time
= ktime_get_ns();
2517 p
->start_boottime
= ktime_get_boottime_ns();
2520 * Make it visible to the rest of the system, but dont wake it up yet.
2521 * Need tasklist lock for parent etc handling!
2523 write_lock_irq(&tasklist_lock
);
2525 /* CLONE_PARENT re-uses the old parent */
2526 if (clone_flags
& (CLONE_PARENT
|CLONE_THREAD
)) {
2527 p
->real_parent
= current
->real_parent
;
2528 p
->parent_exec_id
= current
->parent_exec_id
;
2529 if (clone_flags
& CLONE_THREAD
)
2530 p
->exit_signal
= -1;
2532 p
->exit_signal
= current
->group_leader
->exit_signal
;
2534 p
->real_parent
= current
;
2535 p
->parent_exec_id
= current
->self_exec_id
;
2536 p
->exit_signal
= args
->exit_signal
;
2539 klp_copy_process(p
);
2543 spin_lock(¤t
->sighand
->siglock
);
2547 rseq_fork(p
, clone_flags
);
2549 /* Don't start children in a dying pid namespace */
2550 if (unlikely(!(ns_of_pid(pid
)->pid_allocated
& PIDNS_ADDING
))) {
2552 goto bad_fork_core_free
;
2555 /* Let kill terminate clone/fork in the middle */
2556 if (fatal_signal_pending(current
)) {
2558 goto bad_fork_core_free
;
2561 /* No more failure paths after this point. */
2564 * Copy seccomp details explicitly here, in case they were changed
2565 * before holding sighand lock.
2569 init_task_pid_links(p
);
2570 if (likely(p
->pid
)) {
2571 ptrace_init_task(p
, (clone_flags
& CLONE_PTRACE
) || trace
);
2573 init_task_pid(p
, PIDTYPE_PID
, pid
);
2574 if (thread_group_leader(p
)) {
2575 init_task_pid(p
, PIDTYPE_TGID
, pid
);
2576 init_task_pid(p
, PIDTYPE_PGID
, task_pgrp(current
));
2577 init_task_pid(p
, PIDTYPE_SID
, task_session(current
));
2579 if (is_child_reaper(pid
)) {
2580 ns_of_pid(pid
)->child_reaper
= p
;
2581 p
->signal
->flags
|= SIGNAL_UNKILLABLE
;
2583 p
->signal
->shared_pending
.signal
= delayed
.signal
;
2584 p
->signal
->tty
= tty_kref_get(current
->signal
->tty
);
2586 * Inherit has_child_subreaper flag under the same
2587 * tasklist_lock with adding child to the process tree
2588 * for propagate_has_child_subreaper optimization.
2590 p
->signal
->has_child_subreaper
= p
->real_parent
->signal
->has_child_subreaper
||
2591 p
->real_parent
->signal
->is_child_subreaper
;
2592 list_add_tail(&p
->sibling
, &p
->real_parent
->children
);
2593 list_add_tail_rcu(&p
->tasks
, &init_task
.tasks
);
2594 attach_pid(p
, PIDTYPE_TGID
);
2595 attach_pid(p
, PIDTYPE_PGID
);
2596 attach_pid(p
, PIDTYPE_SID
);
2597 __this_cpu_inc(process_counts
);
2599 current
->signal
->nr_threads
++;
2600 current
->signal
->quick_threads
++;
2601 atomic_inc(¤t
->signal
->live
);
2602 refcount_inc(¤t
->signal
->sigcnt
);
2603 task_join_group_stop(p
);
2604 list_add_tail_rcu(&p
->thread_node
,
2605 &p
->signal
->thread_head
);
2607 attach_pid(p
, PIDTYPE_PID
);
2611 hlist_del_init(&delayed
.node
);
2612 spin_unlock(¤t
->sighand
->siglock
);
2613 syscall_tracepoint_update(p
);
2614 write_unlock_irq(&tasklist_lock
);
2617 fd_install(pidfd
, pidfile
);
2619 proc_fork_connector(p
);
2621 cgroup_post_fork(p
, args
);
2624 trace_task_newtask(p
, clone_flags
);
2625 uprobe_copy_process(p
, clone_flags
);
2626 user_events_fork(p
, clone_flags
);
2628 copy_oom_score_adj(clone_flags
, p
);
2634 spin_unlock(¤t
->sighand
->siglock
);
2635 write_unlock_irq(&tasklist_lock
);
2636 bad_fork_cancel_cgroup
:
2637 cgroup_cancel_fork(p
, args
);
2639 if (clone_flags
& CLONE_PIDFD
) {
2641 put_unused_fd(pidfd
);
2644 if (pid
!= &init_struct_pid
)
2646 bad_fork_cleanup_thread
:
2648 bad_fork_cleanup_io
:
2651 bad_fork_cleanup_namespaces
:
2652 exit_task_namespaces(p
);
2653 bad_fork_cleanup_mm
:
2655 mm_clear_owner(p
->mm
, p
);
2658 bad_fork_cleanup_signal
:
2659 if (!(clone_flags
& CLONE_THREAD
))
2660 free_signal_struct(p
->signal
);
2661 bad_fork_cleanup_sighand
:
2662 __cleanup_sighand(p
->sighand
);
2663 bad_fork_cleanup_fs
:
2664 exit_fs(p
); /* blocking */
2665 bad_fork_cleanup_files
:
2666 exit_files(p
); /* blocking */
2667 bad_fork_cleanup_semundo
:
2669 bad_fork_cleanup_security
:
2670 security_task_free(p
);
2671 bad_fork_cleanup_audit
:
2673 bad_fork_cleanup_perf
:
2674 perf_event_free_task(p
);
2675 bad_fork_sched_cancel_fork
:
2676 sched_cancel_fork(p
);
2677 bad_fork_cleanup_policy
:
2678 lockdep_free_task(p
);
2680 mpol_put(p
->mempolicy
);
2682 bad_fork_cleanup_delayacct
:
2683 delayacct_tsk_free(p
);
2684 bad_fork_cleanup_count
:
2685 dec_rlimit_ucounts(task_ucounts(p
), UCOUNT_RLIMIT_NPROC
, 1);
2688 WRITE_ONCE(p
->__state
, TASK_DEAD
);
2689 exit_task_stack_account(p
);
2691 delayed_free_task(p
);
2693 spin_lock_irq(¤t
->sighand
->siglock
);
2694 hlist_del_init(&delayed
.node
);
2695 spin_unlock_irq(¤t
->sighand
->siglock
);
2696 return ERR_PTR(retval
);
2699 static inline void init_idle_pids(struct task_struct
*idle
)
2703 for (type
= PIDTYPE_PID
; type
< PIDTYPE_MAX
; ++type
) {
2704 INIT_HLIST_NODE(&idle
->pid_links
[type
]); /* not really needed */
2705 init_task_pid(idle
, type
, &init_struct_pid
);
2709 static int idle_dummy(void *dummy
)
2711 /* This function is never called */
2715 struct task_struct
* __init
fork_idle(int cpu
)
2717 struct task_struct
*task
;
2718 struct kernel_clone_args args
= {
2726 task
= copy_process(&init_struct_pid
, 0, cpu_to_node(cpu
), &args
);
2727 if (!IS_ERR(task
)) {
2728 init_idle_pids(task
);
2729 init_idle(task
, cpu
);
2736 * This is like kernel_clone(), but shaved down and tailored to just
2737 * creating io_uring workers. It returns a created task, or an error pointer.
2738 * The returned task is inactive, and the caller must fire it up through
2739 * wake_up_new_task(p). All signals are blocked in the created task.
2741 struct task_struct
*create_io_thread(int (*fn
)(void *), void *arg
, int node
)
2743 unsigned long flags
= CLONE_FS
|CLONE_FILES
|CLONE_SIGHAND
|CLONE_THREAD
|
2745 struct kernel_clone_args args
= {
2746 .flags
= ((lower_32_bits(flags
) | CLONE_VM
|
2747 CLONE_UNTRACED
) & ~CSIGNAL
),
2748 .exit_signal
= (lower_32_bits(flags
) & CSIGNAL
),
2755 return copy_process(NULL
, 0, node
, &args
);
2759 * Ok, this is the main fork-routine.
2761 * It copies the process, and if successful kick-starts
2762 * it and waits for it to finish using the VM if required.
2764 * args->exit_signal is expected to be checked for sanity by the caller.
2766 pid_t
kernel_clone(struct kernel_clone_args
*args
)
2768 u64 clone_flags
= args
->flags
;
2769 struct completion vfork
;
2771 struct task_struct
*p
;
2776 * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument
2777 * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are
2778 * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate
2779 * field in struct clone_args and it still doesn't make sense to have
2780 * them both point at the same memory location. Performing this check
2781 * here has the advantage that we don't need to have a separate helper
2782 * to check for legacy clone().
2784 if ((clone_flags
& CLONE_PIDFD
) &&
2785 (clone_flags
& CLONE_PARENT_SETTID
) &&
2786 (args
->pidfd
== args
->parent_tid
))
2790 * Determine whether and which event to report to ptracer. When
2791 * called from kernel_thread or CLONE_UNTRACED is explicitly
2792 * requested, no event is reported; otherwise, report if the event
2793 * for the type of forking is enabled.
2795 if (!(clone_flags
& CLONE_UNTRACED
)) {
2796 if (clone_flags
& CLONE_VFORK
)
2797 trace
= PTRACE_EVENT_VFORK
;
2798 else if (args
->exit_signal
!= SIGCHLD
)
2799 trace
= PTRACE_EVENT_CLONE
;
2801 trace
= PTRACE_EVENT_FORK
;
2803 if (likely(!ptrace_event_enabled(current
, trace
)))
2807 p
= copy_process(NULL
, trace
, NUMA_NO_NODE
, args
);
2808 add_latent_entropy();
2814 * Do this prior waking up the new thread - the thread pointer
2815 * might get invalid after that point, if the thread exits quickly.
2817 trace_sched_process_fork(current
, p
);
2819 pid
= get_task_pid(p
, PIDTYPE_PID
);
2822 if (clone_flags
& CLONE_PARENT_SETTID
)
2823 put_user(nr
, args
->parent_tid
);
2825 if (clone_flags
& CLONE_VFORK
) {
2826 p
->vfork_done
= &vfork
;
2827 init_completion(&vfork
);
2831 if (IS_ENABLED(CONFIG_LRU_GEN_WALKS_MMU
) && !(clone_flags
& CLONE_VM
)) {
2832 /* lock the task to synchronize with memcg migration */
2834 lru_gen_add_mm(p
->mm
);
2838 wake_up_new_task(p
);
2840 /* forking complete and child started to run, tell ptracer */
2841 if (unlikely(trace
))
2842 ptrace_event_pid(trace
, pid
);
2844 if (clone_flags
& CLONE_VFORK
) {
2845 if (!wait_for_vfork_done(p
, &vfork
))
2846 ptrace_event_pid(PTRACE_EVENT_VFORK_DONE
, pid
);
2854 * Create a kernel thread.
2856 pid_t
kernel_thread(int (*fn
)(void *), void *arg
, const char *name
,
2857 unsigned long flags
)
2859 struct kernel_clone_args args
= {
2860 .flags
= ((lower_32_bits(flags
) | CLONE_VM
|
2861 CLONE_UNTRACED
) & ~CSIGNAL
),
2862 .exit_signal
= (lower_32_bits(flags
) & CSIGNAL
),
2869 return kernel_clone(&args
);
2873 * Create a user mode thread.
2875 pid_t
user_mode_thread(int (*fn
)(void *), void *arg
, unsigned long flags
)
2877 struct kernel_clone_args args
= {
2878 .flags
= ((lower_32_bits(flags
) | CLONE_VM
|
2879 CLONE_UNTRACED
) & ~CSIGNAL
),
2880 .exit_signal
= (lower_32_bits(flags
) & CSIGNAL
),
2885 return kernel_clone(&args
);
2888 #ifdef __ARCH_WANT_SYS_FORK
2889 SYSCALL_DEFINE0(fork
)
2892 struct kernel_clone_args args
= {
2893 .exit_signal
= SIGCHLD
,
2896 return kernel_clone(&args
);
2898 /* can not support in nommu mode */
2904 #ifdef __ARCH_WANT_SYS_VFORK
2905 SYSCALL_DEFINE0(vfork
)
2907 struct kernel_clone_args args
= {
2908 .flags
= CLONE_VFORK
| CLONE_VM
,
2909 .exit_signal
= SIGCHLD
,
2912 return kernel_clone(&args
);
2916 #ifdef __ARCH_WANT_SYS_CLONE
2917 #ifdef CONFIG_CLONE_BACKWARDS
2918 SYSCALL_DEFINE5(clone
, unsigned long, clone_flags
, unsigned long, newsp
,
2919 int __user
*, parent_tidptr
,
2921 int __user
*, child_tidptr
)
2922 #elif defined(CONFIG_CLONE_BACKWARDS2)
2923 SYSCALL_DEFINE5(clone
, unsigned long, newsp
, unsigned long, clone_flags
,
2924 int __user
*, parent_tidptr
,
2925 int __user
*, child_tidptr
,
2927 #elif defined(CONFIG_CLONE_BACKWARDS3)
2928 SYSCALL_DEFINE6(clone
, unsigned long, clone_flags
, unsigned long, newsp
,
2930 int __user
*, parent_tidptr
,
2931 int __user
*, child_tidptr
,
2934 SYSCALL_DEFINE5(clone
, unsigned long, clone_flags
, unsigned long, newsp
,
2935 int __user
*, parent_tidptr
,
2936 int __user
*, child_tidptr
,
2940 struct kernel_clone_args args
= {
2941 .flags
= (lower_32_bits(clone_flags
) & ~CSIGNAL
),
2942 .pidfd
= parent_tidptr
,
2943 .child_tid
= child_tidptr
,
2944 .parent_tid
= parent_tidptr
,
2945 .exit_signal
= (lower_32_bits(clone_flags
) & CSIGNAL
),
2950 return kernel_clone(&args
);
2954 noinline
static int copy_clone_args_from_user(struct kernel_clone_args
*kargs
,
2955 struct clone_args __user
*uargs
,
2959 struct clone_args args
;
2960 pid_t
*kset_tid
= kargs
->set_tid
;
2962 BUILD_BUG_ON(offsetofend(struct clone_args
, tls
) !=
2963 CLONE_ARGS_SIZE_VER0
);
2964 BUILD_BUG_ON(offsetofend(struct clone_args
, set_tid_size
) !=
2965 CLONE_ARGS_SIZE_VER1
);
2966 BUILD_BUG_ON(offsetofend(struct clone_args
, cgroup
) !=
2967 CLONE_ARGS_SIZE_VER2
);
2968 BUILD_BUG_ON(sizeof(struct clone_args
) != CLONE_ARGS_SIZE_VER2
);
2970 if (unlikely(usize
> PAGE_SIZE
))
2972 if (unlikely(usize
< CLONE_ARGS_SIZE_VER0
))
2975 err
= copy_struct_from_user(&args
, sizeof(args
), uargs
, usize
);
2979 if (unlikely(args
.set_tid_size
> MAX_PID_NS_LEVEL
))
2982 if (unlikely(!args
.set_tid
&& args
.set_tid_size
> 0))
2985 if (unlikely(args
.set_tid
&& args
.set_tid_size
== 0))
2989 * Verify that higher 32bits of exit_signal are unset and that
2990 * it is a valid signal
2992 if (unlikely((args
.exit_signal
& ~((u64
)CSIGNAL
)) ||
2993 !valid_signal(args
.exit_signal
)))
2996 if ((args
.flags
& CLONE_INTO_CGROUP
) &&
2997 (args
.cgroup
> INT_MAX
|| usize
< CLONE_ARGS_SIZE_VER2
))
3000 *kargs
= (struct kernel_clone_args
){
3001 .flags
= args
.flags
,
3002 .pidfd
= u64_to_user_ptr(args
.pidfd
),
3003 .child_tid
= u64_to_user_ptr(args
.child_tid
),
3004 .parent_tid
= u64_to_user_ptr(args
.parent_tid
),
3005 .exit_signal
= args
.exit_signal
,
3006 .stack
= args
.stack
,
3007 .stack_size
= args
.stack_size
,
3009 .set_tid_size
= args
.set_tid_size
,
3010 .cgroup
= args
.cgroup
,
3014 copy_from_user(kset_tid
, u64_to_user_ptr(args
.set_tid
),
3015 (kargs
->set_tid_size
* sizeof(pid_t
))))
3018 kargs
->set_tid
= kset_tid
;
3024 * clone3_stack_valid - check and prepare stack
3025 * @kargs: kernel clone args
3027 * Verify that the stack arguments userspace gave us are sane.
3028 * In addition, set the stack direction for userspace since it's easy for us to
3031 static inline bool clone3_stack_valid(struct kernel_clone_args
*kargs
)
3033 if (kargs
->stack
== 0) {
3034 if (kargs
->stack_size
> 0)
3037 if (kargs
->stack_size
== 0)
3040 if (!access_ok((void __user
*)kargs
->stack
, kargs
->stack_size
))
3043 #if !defined(CONFIG_STACK_GROWSUP)
3044 kargs
->stack
+= kargs
->stack_size
;
3051 static bool clone3_args_valid(struct kernel_clone_args
*kargs
)
3053 /* Verify that no unknown flags are passed along. */
3055 ~(CLONE_LEGACY_FLAGS
| CLONE_CLEAR_SIGHAND
| CLONE_INTO_CGROUP
))
3059 * - make the CLONE_DETACHED bit reusable for clone3
3060 * - make the CSIGNAL bits reusable for clone3
3062 if (kargs
->flags
& (CLONE_DETACHED
| (CSIGNAL
& (~CLONE_NEWTIME
))))
3065 if ((kargs
->flags
& (CLONE_SIGHAND
| CLONE_CLEAR_SIGHAND
)) ==
3066 (CLONE_SIGHAND
| CLONE_CLEAR_SIGHAND
))
3069 if ((kargs
->flags
& (CLONE_THREAD
| CLONE_PARENT
)) &&
3073 if (!clone3_stack_valid(kargs
))
3080 * sys_clone3 - create a new process with specific properties
3081 * @uargs: argument structure
3082 * @size: size of @uargs
3084 * clone3() is the extensible successor to clone()/clone2().
3085 * It takes a struct as argument that is versioned by its size.
3087 * Return: On success, a positive PID for the child process.
3088 * On error, a negative errno number.
3090 SYSCALL_DEFINE2(clone3
, struct clone_args __user
*, uargs
, size_t, size
)
3094 struct kernel_clone_args kargs
;
3095 pid_t set_tid
[MAX_PID_NS_LEVEL
];
3097 #ifdef __ARCH_BROKEN_SYS_CLONE3
3098 #warning clone3() entry point is missing, please fix
3102 kargs
.set_tid
= set_tid
;
3104 err
= copy_clone_args_from_user(&kargs
, uargs
, size
);
3108 if (!clone3_args_valid(&kargs
))
3111 return kernel_clone(&kargs
);
3114 void walk_process_tree(struct task_struct
*top
, proc_visitor visitor
, void *data
)
3116 struct task_struct
*leader
, *parent
, *child
;
3119 read_lock(&tasklist_lock
);
3120 leader
= top
= top
->group_leader
;
3122 for_each_thread(leader
, parent
) {
3123 list_for_each_entry(child
, &parent
->children
, sibling
) {
3124 res
= visitor(child
, data
);
3136 if (leader
!= top
) {
3138 parent
= child
->real_parent
;
3139 leader
= parent
->group_leader
;
3143 read_unlock(&tasklist_lock
);
3146 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
3147 #define ARCH_MIN_MMSTRUCT_ALIGN 0
3150 static void sighand_ctor(void *data
)
3152 struct sighand_struct
*sighand
= data
;
3154 spin_lock_init(&sighand
->siglock
);
3155 init_waitqueue_head(&sighand
->signalfd_wqh
);
3158 void __init
mm_cache_init(void)
3160 unsigned int mm_size
;
3163 * The mm_cpumask is located at the end of mm_struct, and is
3164 * dynamically sized based on the maximum CPU number this system
3165 * can have, taking hotplug into account (nr_cpu_ids).
3167 mm_size
= sizeof(struct mm_struct
) + cpumask_size() + mm_cid_size();
3169 mm_cachep
= kmem_cache_create_usercopy("mm_struct",
3170 mm_size
, ARCH_MIN_MMSTRUCT_ALIGN
,
3171 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_ACCOUNT
,
3172 offsetof(struct mm_struct
, saved_auxv
),
3173 sizeof_field(struct mm_struct
, saved_auxv
),
3177 void __init
proc_caches_init(void)
3179 sighand_cachep
= kmem_cache_create("sighand_cache",
3180 sizeof(struct sighand_struct
), 0,
3181 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_TYPESAFE_BY_RCU
|
3182 SLAB_ACCOUNT
, sighand_ctor
);
3183 signal_cachep
= kmem_cache_create("signal_cache",
3184 sizeof(struct signal_struct
), 0,
3185 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_ACCOUNT
,
3187 files_cachep
= kmem_cache_create("files_cache",
3188 sizeof(struct files_struct
), 0,
3189 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_ACCOUNT
,
3191 fs_cachep
= kmem_cache_create("fs_cache",
3192 sizeof(struct fs_struct
), 0,
3193 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_ACCOUNT
,
3196 vm_area_cachep
= KMEM_CACHE(vm_area_struct
, SLAB_PANIC
|SLAB_ACCOUNT
);
3197 #ifdef CONFIG_PER_VMA_LOCK
3198 vma_lock_cachep
= KMEM_CACHE(vma_lock
, SLAB_PANIC
|SLAB_ACCOUNT
);
3201 nsproxy_cache_init();
3205 * Check constraints on flags passed to the unshare system call.
3207 static int check_unshare_flags(unsigned long unshare_flags
)
3209 if (unshare_flags
& ~(CLONE_THREAD
|CLONE_FS
|CLONE_NEWNS
|CLONE_SIGHAND
|
3210 CLONE_VM
|CLONE_FILES
|CLONE_SYSVSEM
|
3211 CLONE_NEWUTS
|CLONE_NEWIPC
|CLONE_NEWNET
|
3212 CLONE_NEWUSER
|CLONE_NEWPID
|CLONE_NEWCGROUP
|
3216 * Not implemented, but pretend it works if there is nothing
3217 * to unshare. Note that unsharing the address space or the
3218 * signal handlers also need to unshare the signal queues (aka
3221 if (unshare_flags
& (CLONE_THREAD
| CLONE_SIGHAND
| CLONE_VM
)) {
3222 if (!thread_group_empty(current
))
3225 if (unshare_flags
& (CLONE_SIGHAND
| CLONE_VM
)) {
3226 if (refcount_read(¤t
->sighand
->count
) > 1)
3229 if (unshare_flags
& CLONE_VM
) {
3230 if (!current_is_single_threaded())
3238 * Unshare the filesystem structure if it is being shared
3240 static int unshare_fs(unsigned long unshare_flags
, struct fs_struct
**new_fsp
)
3242 struct fs_struct
*fs
= current
->fs
;
3244 if (!(unshare_flags
& CLONE_FS
) || !fs
)
3247 /* don't need lock here; in the worst case we'll do useless copy */
3251 *new_fsp
= copy_fs_struct(fs
);
3259 * Unshare file descriptor table if it is being shared
3261 static int unshare_fd(unsigned long unshare_flags
, struct files_struct
**new_fdp
)
3263 struct files_struct
*fd
= current
->files
;
3265 if ((unshare_flags
& CLONE_FILES
) &&
3266 (fd
&& atomic_read(&fd
->count
) > 1)) {
3267 fd
= dup_fd(fd
, NULL
);
3277 * unshare allows a process to 'unshare' part of the process
3278 * context which was originally shared using clone. copy_*
3279 * functions used by kernel_clone() cannot be used here directly
3280 * because they modify an inactive task_struct that is being
3281 * constructed. Here we are modifying the current, active,
3284 int ksys_unshare(unsigned long unshare_flags
)
3286 struct fs_struct
*fs
, *new_fs
= NULL
;
3287 struct files_struct
*new_fd
= NULL
;
3288 struct cred
*new_cred
= NULL
;
3289 struct nsproxy
*new_nsproxy
= NULL
;
3294 * If unsharing a user namespace must also unshare the thread group
3295 * and unshare the filesystem root and working directories.
3297 if (unshare_flags
& CLONE_NEWUSER
)
3298 unshare_flags
|= CLONE_THREAD
| CLONE_FS
;
3300 * If unsharing vm, must also unshare signal handlers.
3302 if (unshare_flags
& CLONE_VM
)
3303 unshare_flags
|= CLONE_SIGHAND
;
3305 * If unsharing a signal handlers, must also unshare the signal queues.
3307 if (unshare_flags
& CLONE_SIGHAND
)
3308 unshare_flags
|= CLONE_THREAD
;
3310 * If unsharing namespace, must also unshare filesystem information.
3312 if (unshare_flags
& CLONE_NEWNS
)
3313 unshare_flags
|= CLONE_FS
;
3315 err
= check_unshare_flags(unshare_flags
);
3317 goto bad_unshare_out
;
3319 * CLONE_NEWIPC must also detach from the undolist: after switching
3320 * to a new ipc namespace, the semaphore arrays from the old
3321 * namespace are unreachable.
3323 if (unshare_flags
& (CLONE_NEWIPC
|CLONE_SYSVSEM
))
3325 err
= unshare_fs(unshare_flags
, &new_fs
);
3327 goto bad_unshare_out
;
3328 err
= unshare_fd(unshare_flags
, &new_fd
);
3330 goto bad_unshare_cleanup_fs
;
3331 err
= unshare_userns(unshare_flags
, &new_cred
);
3333 goto bad_unshare_cleanup_fd
;
3334 err
= unshare_nsproxy_namespaces(unshare_flags
, &new_nsproxy
,
3337 goto bad_unshare_cleanup_cred
;
3340 err
= set_cred_ucounts(new_cred
);
3342 goto bad_unshare_cleanup_cred
;
3345 if (new_fs
|| new_fd
|| do_sysvsem
|| new_cred
|| new_nsproxy
) {
3348 * CLONE_SYSVSEM is equivalent to sys_exit().
3352 if (unshare_flags
& CLONE_NEWIPC
) {
3353 /* Orphan segments in old ns (see sem above). */
3355 shm_init_task(current
);
3359 switch_task_namespaces(current
, new_nsproxy
);
3365 spin_lock(&fs
->lock
);
3366 current
->fs
= new_fs
;
3371 spin_unlock(&fs
->lock
);
3375 swap(current
->files
, new_fd
);
3377 task_unlock(current
);
3380 /* Install the new user namespace */
3381 commit_creds(new_cred
);
3386 perf_event_namespaces(current
);
3388 bad_unshare_cleanup_cred
:
3391 bad_unshare_cleanup_fd
:
3393 put_files_struct(new_fd
);
3395 bad_unshare_cleanup_fs
:
3397 free_fs_struct(new_fs
);
3403 SYSCALL_DEFINE1(unshare
, unsigned long, unshare_flags
)
3405 return ksys_unshare(unshare_flags
);
3409 * Helper to unshare the files of the current task.
3410 * We don't want to expose copy_files internals to
3411 * the exec layer of the kernel.
3414 int unshare_files(void)
3416 struct task_struct
*task
= current
;
3417 struct files_struct
*old
, *copy
= NULL
;
3420 error
= unshare_fd(CLONE_FILES
, ©
);
3428 put_files_struct(old
);
3432 int sysctl_max_threads(const struct ctl_table
*table
, int write
,
3433 void *buffer
, size_t *lenp
, loff_t
*ppos
)
3437 int threads
= max_threads
;
3439 int max
= MAX_THREADS
;
3446 ret
= proc_dointvec_minmax(&t
, write
, buffer
, lenp
, ppos
);
3450 max_threads
= threads
;