2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2011 Wind River Systems,
7 * written by Ralf Baechle <ralf@linux-mips.org>
9 #include <linux/compiler.h>
10 #include <linux/elf-randomize.h>
11 #include <linux/errno.h>
13 #include <linux/mman.h>
14 #include <linux/export.h>
15 #include <linux/personality.h>
16 #include <linux/random.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/mm.h>
20 unsigned long shm_align_mask
= PAGE_SIZE
- 1; /* Sane caches */
21 EXPORT_SYMBOL(shm_align_mask
);
23 #define COLOUR_ALIGN(addr, pgoff) \
24 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
25 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
27 enum mmap_allocation_direction
{UP
, DOWN
};
29 static unsigned long arch_get_unmapped_area_common(struct file
*filp
,
30 unsigned long addr0
, unsigned long len
, unsigned long pgoff
,
31 unsigned long flags
, enum mmap_allocation_direction dir
)
33 struct mm_struct
*mm
= current
->mm
;
34 struct vm_area_struct
*vma
;
35 unsigned long addr
= addr0
;
37 struct vm_unmapped_area_info info
;
39 if (unlikely(len
> TASK_SIZE
))
42 if (flags
& MAP_FIXED
) {
43 /* Even MAP_FIXED mappings must reside within TASK_SIZE */
44 if (TASK_SIZE
- len
< addr
)
48 * We do not accept a shared mapping if it would violate
49 * cache aliasing constraints.
51 if ((flags
& MAP_SHARED
) &&
52 ((addr
- (pgoff
<< PAGE_SHIFT
)) & shm_align_mask
))
58 if (filp
|| (flags
& MAP_SHARED
))
61 /* requesting a specific address */
64 addr
= COLOUR_ALIGN(addr
, pgoff
);
66 addr
= PAGE_ALIGN(addr
);
68 vma
= find_vma(mm
, addr
);
69 if (TASK_SIZE
- len
>= addr
&&
70 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
75 info
.align_mask
= do_color_align
? (PAGE_MASK
& shm_align_mask
) : 0;
76 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
79 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
80 info
.low_limit
= PAGE_SIZE
;
81 info
.high_limit
= mm
->mmap_base
;
82 addr
= vm_unmapped_area(&info
);
84 if (!(addr
& ~PAGE_MASK
))
88 * A failed mmap() very likely causes application failure,
89 * so fall back to the bottom-up function here. This scenario
90 * can happen with large stack limits and large mmap()
96 info
.low_limit
= mm
->mmap_base
;
97 info
.high_limit
= TASK_SIZE
;
98 return vm_unmapped_area(&info
);
101 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr0
,
102 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
104 return arch_get_unmapped_area_common(filp
,
105 addr0
, len
, pgoff
, flags
, UP
);
109 * There is no need to export this but sched.h declares the function as
110 * extern so making it static here results in an error.
112 unsigned long arch_get_unmapped_area_topdown(struct file
*filp
,
113 unsigned long addr0
, unsigned long len
, unsigned long pgoff
,
116 return arch_get_unmapped_area_common(filp
,
117 addr0
, len
, pgoff
, flags
, DOWN
);
120 bool __virt_addr_valid(const volatile void *kaddr
)
122 unsigned long vaddr
= (unsigned long)kaddr
;
124 if ((vaddr
< PAGE_OFFSET
) || (vaddr
>= MAP_BASE
))
127 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr
)));
129 EXPORT_SYMBOL_GPL(__virt_addr_valid
);