1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/compat.h>
3 #include <linux/errno.h>
4 #include <linux/sched.h>
5 #include <linux/sched/mm.h>
6 #include <linux/syscalls.h>
10 #include <linux/sem.h>
11 #include <linux/msg.h>
12 #include <linux/shm.h>
13 #include <linux/stat.h>
14 #include <linux/mman.h>
15 #include <linux/file.h>
16 #include <linux/utsname.h>
17 #include <linux/personality.h>
18 #include <linux/random.h>
19 #include <linux/uaccess.h>
20 #include <linux/elf.h>
24 #include <asm/syscalls.h>
27 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
29 static unsigned long get_align_mask(void)
31 /* handle 32- and 64-bit case with a single conditional */
32 if (va_align
.flags
< 0 || !(va_align
.flags
& (2 - mmap_is_ia32())))
35 if (!(current
->flags
& PF_RANDOMIZE
))
42 * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
43 * va_align.bits, [12:upper_bit), are set to a random value instead of
44 * zeroing them. This random value is computed once per boot. This form
45 * of ASLR is known as "per-boot ASLR".
47 * To achieve this, the random value is added to the info.align_offset
48 * value before calling vm_unmapped_area() or ORed directly to the
51 static unsigned long get_align_bits(void)
53 return va_align
.bits
& get_align_mask();
56 unsigned long align_vdso_addr(unsigned long addr
)
58 unsigned long align_mask
= get_align_mask();
59 addr
= (addr
+ align_mask
) & ~align_mask
;
60 return addr
| get_align_bits();
63 static int __init
control_va_addr_alignment(char *str
)
65 /* guard against enabling this on other CPU families */
66 if (va_align
.flags
< 0)
75 if (!strcmp(str
, "32"))
76 va_align
.flags
= ALIGN_VA_32
;
77 else if (!strcmp(str
, "64"))
78 va_align
.flags
= ALIGN_VA_64
;
79 else if (!strcmp(str
, "off"))
81 else if (!strcmp(str
, "on"))
82 va_align
.flags
= ALIGN_VA_32
| ALIGN_VA_64
;
88 __setup("align_va_addr", control_va_addr_alignment
);
90 SYSCALL_DEFINE6(mmap
, unsigned long, addr
, unsigned long, len
,
91 unsigned long, prot
, unsigned long, flags
,
92 unsigned long, fd
, unsigned long, off
)
99 error
= ksys_mmap_pgoff(addr
, len
, prot
, flags
, fd
, off
>> PAGE_SHIFT
);
104 static void find_start_end(unsigned long addr
, unsigned long flags
,
105 unsigned long *begin
, unsigned long *end
)
107 if (!in_32bit_syscall() && (flags
& MAP_32BIT
)) {
108 /* This is usually used needed to map code in small
109 model, so it needs to be in the first 31bit. Limit
110 it to that. This means we need to move the
111 unmapped base down for this case. This can give
112 conflicts with the heap, but we assume that glibc
113 malloc knows how to fall back to mmap. Give it 1GB
114 of playground for now. -AK */
117 if (current
->flags
& PF_RANDOMIZE
) {
118 *begin
= randomize_page(*begin
, 0x02000000);
123 *begin
= get_mmap_base(1);
124 if (in_32bit_syscall())
125 *end
= task_size_32bit();
127 *end
= task_size_64bit(addr
> DEFAULT_MAP_WINDOW
);
131 arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
132 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
134 struct mm_struct
*mm
= current
->mm
;
135 struct vm_area_struct
*vma
;
136 struct vm_unmapped_area_info info
;
137 unsigned long begin
, end
;
139 if (flags
& MAP_FIXED
)
142 find_start_end(addr
, flags
, &begin
, &end
);
148 addr
= PAGE_ALIGN(addr
);
149 vma
= find_vma(mm
, addr
);
150 if (end
- len
>= addr
&&
151 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
157 info
.low_limit
= begin
;
158 info
.high_limit
= end
;
160 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
162 info
.align_mask
= get_align_mask();
163 info
.align_offset
+= get_align_bits();
165 return vm_unmapped_area(&info
);
169 arch_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
170 const unsigned long len
, const unsigned long pgoff
,
171 const unsigned long flags
)
173 struct vm_area_struct
*vma
;
174 struct mm_struct
*mm
= current
->mm
;
175 unsigned long addr
= addr0
;
176 struct vm_unmapped_area_info info
;
178 /* requested length too big for entire address space */
182 /* No address checking. See comment at mmap_address_hint_valid() */
183 if (flags
& MAP_FIXED
)
186 /* for MAP_32BIT mappings we force the legacy mmap base */
187 if (!in_32bit_syscall() && (flags
& MAP_32BIT
))
190 /* requesting a specific address */
193 if (!mmap_address_hint_valid(addr
, len
))
194 goto get_unmapped_area
;
196 vma
= find_vma(mm
, addr
);
197 if (!vma
|| addr
+ len
<= vm_start_gap(vma
))
202 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
204 info
.low_limit
= PAGE_SIZE
;
205 info
.high_limit
= get_mmap_base(0);
208 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
209 * in the full address space.
211 * !in_32bit_syscall() check to avoid high addresses for x32
212 * (and make it no op on native i386).
214 if (addr
> DEFAULT_MAP_WINDOW
&& !in_32bit_syscall())
215 info
.high_limit
+= TASK_SIZE_MAX
- DEFAULT_MAP_WINDOW
;
218 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
220 info
.align_mask
= get_align_mask();
221 info
.align_offset
+= get_align_bits();
223 addr
= vm_unmapped_area(&info
);
224 if (!(addr
& ~PAGE_MASK
))
226 VM_BUG_ON(addr
!= -ENOMEM
);
230 * A failed mmap() very likely causes application failure,
231 * so fall back to the bottom-up function here. This scenario
232 * can happen with large stack limits and large mmap()
235 return arch_get_unmapped_area(filp
, addr0
, len
, pgoff
, flags
);