2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2011 Wind River Systems,
7 * written by Ralf Baechle <ralf@linux-mips.org>
9 #include <linux/compiler.h>
10 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/module.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
16 #include <linux/sched.h>
18 unsigned long shm_align_mask
= PAGE_SIZE
- 1; /* Sane caches */
19 EXPORT_SYMBOL(shm_align_mask
);
21 /* gap between mmap and stack */
22 #define MIN_GAP (128*1024*1024UL)
23 #define MAX_GAP ((TASK_SIZE)/6*5)
25 static int mmap_is_legacy(void)
27 if (current
->personality
& ADDR_COMPAT_LAYOUT
)
30 if (rlimit(RLIMIT_STACK
) == RLIM_INFINITY
)
33 return sysctl_legacy_va_layout
;
36 static unsigned long mmap_base(unsigned long rnd
)
38 unsigned long gap
= rlimit(RLIMIT_STACK
);
42 else if (gap
> MAX_GAP
)
45 return PAGE_ALIGN(TASK_SIZE
- gap
- rnd
);
48 #define COLOUR_ALIGN(addr, pgoff) \
49 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
50 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
52 enum mmap_allocation_direction
{UP
, DOWN
};
54 static unsigned long arch_get_unmapped_area_common(struct file
*filp
,
55 unsigned long addr0
, unsigned long len
, unsigned long pgoff
,
56 unsigned long flags
, enum mmap_allocation_direction dir
)
58 struct mm_struct
*mm
= current
->mm
;
59 struct vm_area_struct
*vma
;
60 unsigned long addr
= addr0
;
62 struct vm_unmapped_area_info info
;
64 if (unlikely(len
> TASK_SIZE
))
67 if (flags
& MAP_FIXED
) {
68 /* Even MAP_FIXED mappings must reside within TASK_SIZE */
69 if (TASK_SIZE
- len
< addr
)
73 * We do not accept a shared mapping if it would violate
74 * cache aliasing constraints.
76 if ((flags
& MAP_SHARED
) &&
77 ((addr
- (pgoff
<< PAGE_SHIFT
)) & shm_align_mask
))
83 if (filp
|| (flags
& MAP_SHARED
))
86 /* requesting a specific address */
89 addr
= COLOUR_ALIGN(addr
, pgoff
);
91 addr
= PAGE_ALIGN(addr
);
93 vma
= find_vma(mm
, addr
);
94 if (TASK_SIZE
- len
>= addr
&&
95 (!vma
|| addr
+ len
<= vma
->vm_start
))
100 info
.align_mask
= do_color_align
? (PAGE_MASK
& shm_align_mask
) : 0;
101 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
104 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
105 info
.low_limit
= PAGE_SIZE
;
106 info
.high_limit
= mm
->mmap_base
;
107 addr
= vm_unmapped_area(&info
);
109 if (!(addr
& ~PAGE_MASK
))
113 * A failed mmap() very likely causes application failure,
114 * so fall back to the bottom-up function here. This scenario
115 * can happen with large stack limits and large mmap()
121 info
.low_limit
= mm
->mmap_base
;
122 info
.high_limit
= TASK_SIZE
;
123 return vm_unmapped_area(&info
);
126 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr0
,
127 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
129 return arch_get_unmapped_area_common(filp
,
130 addr0
, len
, pgoff
, flags
, UP
);
134 * There is no need to export this but sched.h declares the function as
135 * extern so making it static here results in an error.
137 unsigned long arch_get_unmapped_area_topdown(struct file
*filp
,
138 unsigned long addr0
, unsigned long len
, unsigned long pgoff
,
141 return arch_get_unmapped_area_common(filp
,
142 addr0
, len
, pgoff
, flags
, DOWN
);
145 unsigned long arch_mmap_rnd(void)
149 rnd
= (unsigned long)get_random_int();
151 if (TASK_IS_32BIT_ADDR
)
159 void arch_pick_mmap_layout(struct mm_struct
*mm
)
161 unsigned long random_factor
= 0UL;
163 if (current
->flags
& PF_RANDOMIZE
)
164 random_factor
= arch_mmap_rnd();
166 if (mmap_is_legacy()) {
167 mm
->mmap_base
= TASK_UNMAPPED_BASE
+ random_factor
;
168 mm
->get_unmapped_area
= arch_get_unmapped_area
;
170 mm
->mmap_base
= mmap_base(random_factor
);
171 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;
175 static inline unsigned long brk_rnd(void)
177 unsigned long rnd
= get_random_int();
179 rnd
= rnd
<< PAGE_SHIFT
;
180 /* 8MB for 32bit, 256MB for 64bit */
181 if (TASK_IS_32BIT_ADDR
)
182 rnd
= rnd
& 0x7ffffful
;
184 rnd
= rnd
& 0xffffffful
;
189 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
191 unsigned long base
= mm
->brk
;
194 ret
= PAGE_ALIGN(base
+ brk_rnd());
202 int __virt_addr_valid(const volatile void *kaddr
)
204 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr
)));
206 EXPORT_SYMBOL_GPL(__virt_addr_valid
);