2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2011 Wind River Systems,
7 * written by Ralf Baechle <ralf@linux-mips.org>
9 #include <linux/errno.h>
11 #include <linux/mman.h>
12 #include <linux/module.h>
13 #include <linux/personality.h>
14 #include <linux/random.h>
15 #include <linux/sched.h>
17 unsigned long shm_align_mask
= PAGE_SIZE
- 1; /* Sane caches */
19 EXPORT_SYMBOL(shm_align_mask
);
21 /* gap between mmap and stack */
22 #define MIN_GAP (128*1024*1024UL)
23 #define MAX_GAP ((TASK_SIZE)/6*5)
25 static int mmap_is_legacy(void)
27 if (current
->personality
& ADDR_COMPAT_LAYOUT
)
30 if (rlimit(RLIMIT_STACK
) == RLIM_INFINITY
)
33 return sysctl_legacy_va_layout
;
36 static unsigned long mmap_base(unsigned long rnd
)
38 unsigned long gap
= rlimit(RLIMIT_STACK
);
42 else if (gap
> MAX_GAP
)
45 return PAGE_ALIGN(TASK_SIZE
- gap
- rnd
);
48 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr
,
51 unsigned long base
= addr
& ~shm_align_mask
;
52 unsigned long off
= (pgoff
<< PAGE_SHIFT
) & shm_align_mask
;
54 if (base
+ off
<= addr
)
60 #define COLOUR_ALIGN(addr,pgoff) \
61 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
62 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
64 enum mmap_allocation_direction
{UP
, DOWN
};
66 static unsigned long arch_get_unmapped_area_foo(struct file
*filp
,
67 unsigned long addr0
, unsigned long len
, unsigned long pgoff
,
68 unsigned long flags
, enum mmap_allocation_direction dir
)
70 struct mm_struct
*mm
= current
->mm
;
71 struct vm_area_struct
*vma
;
72 unsigned long addr
= addr0
;
75 if (unlikely(len
> TASK_SIZE
))
78 if (flags
& MAP_FIXED
) {
79 /* Even MAP_FIXED mappings must reside within TASK_SIZE */
80 if (TASK_SIZE
- len
< addr
)
84 * We do not accept a shared mapping if it would violate
85 * cache aliasing constraints.
87 if ((flags
& MAP_SHARED
) &&
88 ((addr
- (pgoff
<< PAGE_SHIFT
)) & shm_align_mask
))
94 if (filp
|| (flags
& MAP_SHARED
))
97 /* requesting a specific address */
100 addr
= COLOUR_ALIGN(addr
, pgoff
);
102 addr
= PAGE_ALIGN(addr
);
104 vma
= find_vma(mm
, addr
);
105 if (TASK_SIZE
- len
>= addr
&&
106 (!vma
|| addr
+ len
<= vma
->vm_start
))
111 addr
= mm
->mmap_base
;
113 addr
= COLOUR_ALIGN(addr
, pgoff
);
115 addr
= PAGE_ALIGN(addr
);
117 for (vma
= find_vma(current
->mm
, addr
); ; vma
= vma
->vm_next
) {
118 /* At this point: (!vma || addr < vma->vm_end). */
119 if (TASK_SIZE
- len
< addr
)
121 if (!vma
|| addr
+ len
<= vma
->vm_start
)
125 addr
= COLOUR_ALIGN(addr
, pgoff
);
128 /* check if free_area_cache is useful for us */
129 if (len
<= mm
->cached_hole_size
) {
130 mm
->cached_hole_size
= 0;
131 mm
->free_area_cache
= mm
->mmap_base
;
134 /* either no address requested or can't fit in requested address hole */
135 addr
= mm
->free_area_cache
;
136 if (do_color_align
) {
138 COLOUR_ALIGN_DOWN(addr
- len
, pgoff
);
143 /* make sure it can fit in the remaining address space */
144 if (likely(addr
> len
)) {
145 vma
= find_vma(mm
, addr
- len
);
146 if (!vma
|| addr
<= vma
->vm_start
) {
147 /* remember the address as a hint for next time */
148 return mm
->free_area_cache
= addr
-len
;
152 if (unlikely(mm
->mmap_base
< len
))
155 addr
= mm
->mmap_base
-len
;
157 addr
= COLOUR_ALIGN_DOWN(addr
, pgoff
);
161 * Lookup failure means no vma is above this address,
162 * else if new region fits below vma->vm_start,
163 * return with success:
165 vma
= find_vma(mm
, addr
);
166 if (likely(!vma
|| addr
+len
<= vma
->vm_start
)) {
167 /* remember the address as a hint for next time */
168 return mm
->free_area_cache
= addr
;
171 /* remember the largest hole we saw so far */
172 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
173 mm
->cached_hole_size
= vma
->vm_start
- addr
;
175 /* try just below the current vma->vm_start */
176 addr
= vma
->vm_start
-len
;
178 addr
= COLOUR_ALIGN_DOWN(addr
, pgoff
);
179 } while (likely(len
< vma
->vm_start
));
183 * A failed mmap() very likely causes application failure,
184 * so fall back to the bottom-up function here. This scenario
185 * can happen with large stack limits and large mmap()
188 mm
->cached_hole_size
= ~0UL;
189 mm
->free_area_cache
= TASK_UNMAPPED_BASE
;
190 addr
= arch_get_unmapped_area(filp
, addr0
, len
, pgoff
, flags
);
192 * Restore the topdown base:
194 mm
->free_area_cache
= mm
->mmap_base
;
195 mm
->cached_hole_size
= ~0UL;
201 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr0
,
202 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
204 return arch_get_unmapped_area_foo(filp
,
205 addr0
, len
, pgoff
, flags
, UP
);
209 * There is no need to export this but sched.h declares the function as
210 * extern so making it static here results in an error.
212 unsigned long arch_get_unmapped_area_topdown(struct file
*filp
,
213 unsigned long addr0
, unsigned long len
, unsigned long pgoff
,
216 return arch_get_unmapped_area_foo(filp
,
217 addr0
, len
, pgoff
, flags
, DOWN
);
220 void arch_pick_mmap_layout(struct mm_struct
*mm
)
222 unsigned long random_factor
= 0UL;
224 if (current
->flags
& PF_RANDOMIZE
) {
225 random_factor
= get_random_int();
226 random_factor
= random_factor
<< PAGE_SHIFT
;
227 if (TASK_IS_32BIT_ADDR
)
228 random_factor
&= 0xfffffful
;
230 random_factor
&= 0xffffffful
;
233 if (mmap_is_legacy()) {
234 mm
->mmap_base
= TASK_UNMAPPED_BASE
+ random_factor
;
235 mm
->get_unmapped_area
= arch_get_unmapped_area
;
236 mm
->unmap_area
= arch_unmap_area
;
238 mm
->mmap_base
= mmap_base(random_factor
);
239 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;
240 mm
->unmap_area
= arch_unmap_area_topdown
;
244 static inline unsigned long brk_rnd(void)
246 unsigned long rnd
= get_random_int();
248 rnd
= rnd
<< PAGE_SHIFT
;
249 /* 8MB for 32bit, 256MB for 64bit */
250 if (TASK_IS_32BIT_ADDR
)
251 rnd
= rnd
& 0x7ffffful
;
253 rnd
= rnd
& 0xffffffful
;
258 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
260 unsigned long base
= mm
->brk
;
263 ret
= PAGE_ALIGN(base
+ brk_rnd());