1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/compat.h>
3 #include <linux/errno.h>
4 #include <linux/sched.h>
5 #include <linux/sched/mm.h>
6 #include <linux/syscalls.h>
10 #include <linux/sem.h>
11 #include <linux/msg.h>
12 #include <linux/shm.h>
13 #include <linux/stat.h>
14 #include <linux/mman.h>
15 #include <linux/file.h>
16 #include <linux/utsname.h>
17 #include <linux/personality.h>
18 #include <linux/random.h>
19 #include <linux/uaccess.h>
20 #include <linux/elf.h>
26 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
28 static unsigned long get_align_mask(void)
30 /* handle 32- and 64-bit case with a single conditional */
31 if (va_align
.flags
< 0 || !(va_align
.flags
& (2 - mmap_is_ia32())))
34 if (!(current
->flags
& PF_RANDOMIZE
))
41 * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
42 * va_align.bits, [12:upper_bit), are set to a random value instead of
43 * zeroing them. This random value is computed once per boot. This form
44 * of ASLR is known as "per-boot ASLR".
46 * To achieve this, the random value is added to the info.align_offset
47 * value before calling vm_unmapped_area() or ORed directly to the
50 static unsigned long get_align_bits(void)
52 return va_align
.bits
& get_align_mask();
55 static int __init
control_va_addr_alignment(char *str
)
57 /* guard against enabling this on other CPU families */
58 if (va_align
.flags
< 0)
64 if (!strcmp(str
, "32"))
65 va_align
.flags
= ALIGN_VA_32
;
66 else if (!strcmp(str
, "64"))
67 va_align
.flags
= ALIGN_VA_64
;
68 else if (!strcmp(str
, "off"))
70 else if (!strcmp(str
, "on"))
71 va_align
.flags
= ALIGN_VA_32
| ALIGN_VA_64
;
73 pr_warn("invalid option value: 'align_va_addr=%s'\n", str
);
77 __setup("align_va_addr=", control_va_addr_alignment
);
79 SYSCALL_DEFINE6(mmap
, unsigned long, addr
, unsigned long, len
,
80 unsigned long, prot
, unsigned long, flags
,
81 unsigned long, fd
, unsigned long, off
)
86 return ksys_mmap_pgoff(addr
, len
, prot
, flags
, fd
, off
>> PAGE_SHIFT
);
89 static void find_start_end(unsigned long addr
, unsigned long flags
,
90 unsigned long *begin
, unsigned long *end
)
92 if (!in_32bit_syscall() && (flags
& MAP_32BIT
)) {
93 /* This is usually used needed to map code in small
94 model, so it needs to be in the first 31bit. Limit
95 it to that. This means we need to move the
96 unmapped base down for this case. This can give
97 conflicts with the heap, but we assume that glibc
98 malloc knows how to fall back to mmap. Give it 1GB
99 of playground for now. -AK */
102 if (current
->flags
& PF_RANDOMIZE
) {
103 *begin
= randomize_page(*begin
, 0x02000000);
108 *begin
= get_mmap_base(1);
109 if (in_32bit_syscall())
110 *end
= task_size_32bit();
112 *end
= task_size_64bit(addr
> DEFAULT_MAP_WINDOW
);
115 static inline unsigned long stack_guard_placement(vm_flags_t vm_flags
)
117 if (vm_flags
& VM_SHADOW_STACK
)
124 arch_get_unmapped_area(struct file
*filp
, unsigned long addr
, unsigned long len
,
125 unsigned long pgoff
, unsigned long flags
, vm_flags_t vm_flags
)
127 struct mm_struct
*mm
= current
->mm
;
128 struct vm_area_struct
*vma
;
129 struct vm_unmapped_area_info info
= {};
130 unsigned long begin
, end
;
132 if (flags
& MAP_FIXED
)
135 find_start_end(addr
, flags
, &begin
, &end
);
141 addr
= PAGE_ALIGN(addr
);
142 vma
= find_vma(mm
, addr
);
143 if (end
- len
>= addr
&&
144 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
149 info
.low_limit
= begin
;
150 info
.high_limit
= end
;
151 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
152 info
.start_gap
= stack_guard_placement(vm_flags
);
154 info
.align_mask
= get_align_mask();
155 info
.align_offset
+= get_align_bits();
157 return vm_unmapped_area(&info
);
161 arch_get_unmapped_area_topdown(struct file
*filp
, unsigned long addr0
,
162 unsigned long len
, unsigned long pgoff
,
163 unsigned long flags
, vm_flags_t vm_flags
)
165 struct vm_area_struct
*vma
;
166 struct mm_struct
*mm
= current
->mm
;
167 unsigned long addr
= addr0
;
168 struct vm_unmapped_area_info info
= {};
170 /* requested length too big for entire address space */
174 /* No address checking. See comment at mmap_address_hint_valid() */
175 if (flags
& MAP_FIXED
)
178 /* for MAP_32BIT mappings we force the legacy mmap base */
179 if (!in_32bit_syscall() && (flags
& MAP_32BIT
))
182 /* requesting a specific address */
185 if (!mmap_address_hint_valid(addr
, len
))
186 goto get_unmapped_area
;
188 vma
= find_vma(mm
, addr
);
189 if (!vma
|| addr
+ len
<= vm_start_gap(vma
))
194 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
196 if (!in_32bit_syscall() && (flags
& MAP_ABOVE4G
))
197 info
.low_limit
= SZ_4G
;
199 info
.low_limit
= PAGE_SIZE
;
201 info
.high_limit
= get_mmap_base(0);
202 info
.start_gap
= stack_guard_placement(vm_flags
);
205 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
206 * in the full address space.
208 * !in_32bit_syscall() check to avoid high addresses for x32
209 * (and make it no op on native i386).
211 if (addr
> DEFAULT_MAP_WINDOW
&& !in_32bit_syscall())
212 info
.high_limit
+= TASK_SIZE_MAX
- DEFAULT_MAP_WINDOW
;
214 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
216 info
.align_mask
= get_align_mask();
217 info
.align_offset
+= get_align_bits();
219 addr
= vm_unmapped_area(&info
);
220 if (!(addr
& ~PAGE_MASK
))
222 VM_BUG_ON(addr
!= -ENOMEM
);
226 * A failed mmap() very likely causes application failure,
227 * so fall back to the bottom-up function here. This scenario
228 * can happen with large stack limits and large mmap()
231 return arch_get_unmapped_area(filp
, addr0
, len
, pgoff
, flags
, 0);