x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub
[linux/fpc-iii.git] / arch / x86 / kernel / sys_x86_64.c
blob30277e27431acde9a9320e0b1be4470bddb40e3a
1 #include <linux/errno.h>
2 #include <linux/sched.h>
3 #include <linux/syscalls.h>
4 #include <linux/mm.h>
5 #include <linux/fs.h>
6 #include <linux/smp.h>
7 #include <linux/sem.h>
8 #include <linux/msg.h>
9 #include <linux/shm.h>
10 #include <linux/stat.h>
11 #include <linux/mman.h>
12 #include <linux/file.h>
13 #include <linux/utsname.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
16 #include <linux/uaccess.h>
17 #include <linux/elf.h>
19 #include <asm/ia32.h>
20 #include <asm/syscalls.h>
23 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
25 static unsigned long get_align_mask(void)
27 /* handle 32- and 64-bit case with a single conditional */
28 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
29 return 0;
31 if (!(current->flags & PF_RANDOMIZE))
32 return 0;
34 return va_align.mask;
37 unsigned long align_vdso_addr(unsigned long addr)
39 unsigned long align_mask = get_align_mask();
40 return (addr + align_mask) & ~align_mask;
43 static int __init control_va_addr_alignment(char *str)
45 /* guard against enabling this on other CPU families */
46 if (va_align.flags < 0)
47 return 1;
49 if (*str == 0)
50 return 1;
52 if (*str == '=')
53 str++;
55 if (!strcmp(str, "32"))
56 va_align.flags = ALIGN_VA_32;
57 else if (!strcmp(str, "64"))
58 va_align.flags = ALIGN_VA_64;
59 else if (!strcmp(str, "off"))
60 va_align.flags = 0;
61 else if (!strcmp(str, "on"))
62 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
63 else
64 return 0;
66 return 1;
68 __setup("align_va_addr", control_va_addr_alignment);
70 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
71 unsigned long, prot, unsigned long, flags,
72 unsigned long, fd, unsigned long, off)
74 long error;
75 error = -EINVAL;
76 if (off & ~PAGE_MASK)
77 goto out;
79 error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
80 out:
81 return error;
84 static void find_start_end(unsigned long flags, unsigned long *begin,
85 unsigned long *end)
87 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
88 unsigned long new_begin;
89 /* This is usually used needed to map code in small
90 model, so it needs to be in the first 31bit. Limit
91 it to that. This means we need to move the
92 unmapped base down for this case. This can give
93 conflicts with the heap, but we assume that glibc
94 malloc knows how to fall back to mmap. Give it 1GB
95 of playground for now. -AK */
96 *begin = 0x40000000;
97 *end = 0x80000000;
98 if (current->flags & PF_RANDOMIZE) {
99 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
100 if (new_begin)
101 *begin = new_begin;
103 } else {
104 *begin = current->mm->mmap_legacy_base;
105 *end = TASK_SIZE;
109 unsigned long
110 arch_get_unmapped_area(struct file *filp, unsigned long addr,
111 unsigned long len, unsigned long pgoff, unsigned long flags)
113 struct mm_struct *mm = current->mm;
114 struct vm_area_struct *vma;
115 struct vm_unmapped_area_info info;
116 unsigned long begin, end;
118 if (flags & MAP_FIXED)
119 return addr;
121 find_start_end(flags, &begin, &end);
123 if (len > end)
124 return -ENOMEM;
126 if (addr) {
127 addr = PAGE_ALIGN(addr);
128 vma = find_vma(mm, addr);
129 if (end - len >= addr &&
130 (!vma || addr + len <= vma->vm_start))
131 return addr;
134 info.flags = 0;
135 info.length = len;
136 info.low_limit = begin;
137 info.high_limit = end;
138 info.align_mask = filp ? get_align_mask() : 0;
139 info.align_offset = pgoff << PAGE_SHIFT;
140 return vm_unmapped_area(&info);
143 unsigned long
144 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
145 const unsigned long len, const unsigned long pgoff,
146 const unsigned long flags)
148 struct vm_area_struct *vma;
149 struct mm_struct *mm = current->mm;
150 unsigned long addr = addr0;
151 struct vm_unmapped_area_info info;
153 /* requested length too big for entire address space */
154 if (len > TASK_SIZE)
155 return -ENOMEM;
157 if (flags & MAP_FIXED)
158 return addr;
160 /* for MAP_32BIT mappings we force the legacy mmap base */
161 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
162 goto bottomup;
164 /* requesting a specific address */
165 if (addr) {
166 addr = PAGE_ALIGN(addr);
167 vma = find_vma(mm, addr);
168 if (TASK_SIZE - len >= addr &&
169 (!vma || addr + len <= vma->vm_start))
170 return addr;
173 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
174 info.length = len;
175 info.low_limit = PAGE_SIZE;
176 info.high_limit = mm->mmap_base;
177 info.align_mask = filp ? get_align_mask() : 0;
178 info.align_offset = pgoff << PAGE_SHIFT;
179 addr = vm_unmapped_area(&info);
180 if (!(addr & ~PAGE_MASK))
181 return addr;
182 VM_BUG_ON(addr != -ENOMEM);
184 bottomup:
186 * A failed mmap() very likely causes application failure,
187 * so fall back to the bottom-up function here. This scenario
188 * can happen with large stack limits and large mmap()
189 * allocations.
191 return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);