1 // SPDX-License-Identifier: GPL-2.0+
3 * flexible mmap layout support
5 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
8 * Started by Ingo Molnar <mingo@elte.hu>
11 #include <linux/elf-randomize.h>
12 #include <linux/personality.h>
14 #include <linux/mman.h>
15 #include <linux/sched/signal.h>
16 #include <linux/sched/mm.h>
17 #include <linux/random.h>
18 #include <linux/compat.h>
19 #include <linux/security.h>
20 #include <asm/pgalloc.h>
23 static unsigned long stack_maxrandom_size(void)
25 if (!(current
->flags
& PF_RANDOMIZE
))
27 return STACK_RND_MASK
<< PAGE_SHIFT
;
30 static inline int mmap_is_legacy(struct rlimit
*rlim_stack
)
32 if (current
->personality
& ADDR_COMPAT_LAYOUT
)
34 if (rlim_stack
->rlim_cur
== RLIM_INFINITY
)
36 return sysctl_legacy_va_layout
;
39 unsigned long arch_mmap_rnd(void)
41 return (get_random_int() & MMAP_RND_MASK
) << PAGE_SHIFT
;
44 static unsigned long mmap_base_legacy(unsigned long rnd
)
46 return TASK_UNMAPPED_BASE
+ rnd
;
49 static inline unsigned long mmap_base(unsigned long rnd
,
50 struct rlimit
*rlim_stack
)
52 unsigned long gap
= rlim_stack
->rlim_cur
;
53 unsigned long pad
= stack_maxrandom_size() + stack_guard_gap
;
54 unsigned long gap_min
, gap_max
;
56 /* Values close to RLIM_INFINITY can overflow. */
61 * Top of mmap area (just below the process stack).
62 * Leave at least a ~32 MB hole.
64 gap_min
= 32 * 1024 * 1024UL;
65 gap_max
= (STACK_TOP
/ 6) * 5;
69 else if (gap
> gap_max
)
72 return PAGE_ALIGN(STACK_TOP
- gap
- rnd
);
76 arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
77 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
79 struct mm_struct
*mm
= current
->mm
;
80 struct vm_area_struct
*vma
;
81 struct vm_unmapped_area_info info
;
84 if (len
> TASK_SIZE
- mmap_min_addr
)
87 if (flags
& MAP_FIXED
)
88 goto check_asce_limit
;
91 addr
= PAGE_ALIGN(addr
);
92 vma
= find_vma(mm
, addr
);
93 if (TASK_SIZE
- len
>= addr
&& addr
>= mmap_min_addr
&&
94 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
95 goto check_asce_limit
;
100 info
.low_limit
= mm
->mmap_base
;
101 info
.high_limit
= TASK_SIZE
;
102 if (filp
|| (flags
& MAP_SHARED
))
103 info
.align_mask
= MMAP_ALIGN_MASK
<< PAGE_SHIFT
;
106 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
107 addr
= vm_unmapped_area(&info
);
108 if (addr
& ~PAGE_MASK
)
112 if (addr
+ len
> current
->mm
->context
.asce_limit
&&
113 addr
+ len
<= TASK_SIZE
) {
114 rc
= crst_table_upgrade(mm
, addr
+ len
);
116 return (unsigned long) rc
;
123 arch_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
124 const unsigned long len
, const unsigned long pgoff
,
125 const unsigned long flags
)
127 struct vm_area_struct
*vma
;
128 struct mm_struct
*mm
= current
->mm
;
129 unsigned long addr
= addr0
;
130 struct vm_unmapped_area_info info
;
133 /* requested length too big for entire address space */
134 if (len
> TASK_SIZE
- mmap_min_addr
)
137 if (flags
& MAP_FIXED
)
138 goto check_asce_limit
;
140 /* requesting a specific address */
142 addr
= PAGE_ALIGN(addr
);
143 vma
= find_vma(mm
, addr
);
144 if (TASK_SIZE
- len
>= addr
&& addr
>= mmap_min_addr
&&
145 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
146 goto check_asce_limit
;
149 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
151 info
.low_limit
= max(PAGE_SIZE
, mmap_min_addr
);
152 info
.high_limit
= mm
->mmap_base
;
153 if (filp
|| (flags
& MAP_SHARED
))
154 info
.align_mask
= MMAP_ALIGN_MASK
<< PAGE_SHIFT
;
157 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
158 addr
= vm_unmapped_area(&info
);
161 * A failed mmap() very likely causes application failure,
162 * so fall back to the bottom-up function here. This scenario
163 * can happen with large stack limits and large mmap()
166 if (addr
& ~PAGE_MASK
) {
167 VM_BUG_ON(addr
!= -ENOMEM
);
169 info
.low_limit
= TASK_UNMAPPED_BASE
;
170 info
.high_limit
= TASK_SIZE
;
171 addr
= vm_unmapped_area(&info
);
172 if (addr
& ~PAGE_MASK
)
177 if (addr
+ len
> current
->mm
->context
.asce_limit
&&
178 addr
+ len
<= TASK_SIZE
) {
179 rc
= crst_table_upgrade(mm
, addr
+ len
);
181 return (unsigned long) rc
;
188 * This function, called very early during the creation of a new
189 * process VM image, sets up which VM layout function to use:
191 void arch_pick_mmap_layout(struct mm_struct
*mm
, struct rlimit
*rlim_stack
)
193 unsigned long random_factor
= 0UL;
195 if (current
->flags
& PF_RANDOMIZE
)
196 random_factor
= arch_mmap_rnd();
199 * Fall back to the standard layout if the personality
200 * bit is set, or if the expected stack growth is unlimited:
202 if (mmap_is_legacy(rlim_stack
)) {
203 mm
->mmap_base
= mmap_base_legacy(random_factor
);
204 mm
->get_unmapped_area
= arch_get_unmapped_area
;
206 mm
->mmap_base
= mmap_base(random_factor
, rlim_stack
);
207 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;