1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/arm/mm/mmap.c
7 #include <linux/mman.h>
9 #include <linux/sched/signal.h>
10 #include <linux/sched/mm.h>
12 #include <linux/personality.h>
13 #include <linux/random.h>
14 #include <asm/cachetype.h>
16 #define COLOUR_ALIGN(addr,pgoff) \
17 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
18 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
20 /* gap between mmap and stack */
21 #define MIN_GAP (128*1024*1024UL)
22 #define MAX_GAP ((TASK_SIZE)/6*5)
24 static int mmap_is_legacy(struct rlimit
*rlim_stack
)
26 if (current
->personality
& ADDR_COMPAT_LAYOUT
)
29 if (rlim_stack
->rlim_cur
== RLIM_INFINITY
)
32 return sysctl_legacy_va_layout
;
35 static unsigned long mmap_base(unsigned long rnd
, struct rlimit
*rlim_stack
)
37 unsigned long gap
= rlim_stack
->rlim_cur
;
41 else if (gap
> MAX_GAP
)
44 return PAGE_ALIGN(TASK_SIZE
- gap
- rnd
);
48 * We need to ensure that shared mappings are correctly aligned to
49 * avoid aliasing issues with VIPT caches. We need to ensure that
50 * a specific page of an object is always mapped at a multiple of
53 * We unconditionally provide this function for all cases, however
54 * in the VIVT case, we optimise out the alignment rules.
57 arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
58 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
60 struct mm_struct
*mm
= current
->mm
;
61 struct vm_area_struct
*vma
;
63 int aliasing
= cache_is_vipt_aliasing();
64 struct vm_unmapped_area_info info
;
67 * We only need to do colour alignment if either the I or D
71 do_align
= filp
|| (flags
& MAP_SHARED
);
74 * We enforce the MAP_FIXED case.
76 if (flags
& MAP_FIXED
) {
77 if (aliasing
&& flags
& MAP_SHARED
&&
78 (addr
- (pgoff
<< PAGE_SHIFT
)) & (SHMLBA
- 1))
88 addr
= COLOUR_ALIGN(addr
, pgoff
);
90 addr
= PAGE_ALIGN(addr
);
92 vma
= find_vma(mm
, addr
);
93 if (TASK_SIZE
- len
>= addr
&&
94 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
100 info
.low_limit
= mm
->mmap_base
;
101 info
.high_limit
= TASK_SIZE
;
102 info
.align_mask
= do_align
? (PAGE_MASK
& (SHMLBA
- 1)) : 0;
103 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
104 return vm_unmapped_area(&info
);
108 arch_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
109 const unsigned long len
, const unsigned long pgoff
,
110 const unsigned long flags
)
112 struct vm_area_struct
*vma
;
113 struct mm_struct
*mm
= current
->mm
;
114 unsigned long addr
= addr0
;
116 int aliasing
= cache_is_vipt_aliasing();
117 struct vm_unmapped_area_info info
;
120 * We only need to do colour alignment if either the I or D
124 do_align
= filp
|| (flags
& MAP_SHARED
);
126 /* requested length too big for entire address space */
130 if (flags
& MAP_FIXED
) {
131 if (aliasing
&& flags
& MAP_SHARED
&&
132 (addr
- (pgoff
<< PAGE_SHIFT
)) & (SHMLBA
- 1))
137 /* requesting a specific address */
140 addr
= COLOUR_ALIGN(addr
, pgoff
);
142 addr
= PAGE_ALIGN(addr
);
143 vma
= find_vma(mm
, addr
);
144 if (TASK_SIZE
- len
>= addr
&&
145 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
149 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
151 info
.low_limit
= FIRST_USER_ADDRESS
;
152 info
.high_limit
= mm
->mmap_base
;
153 info
.align_mask
= do_align
? (PAGE_MASK
& (SHMLBA
- 1)) : 0;
154 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
155 addr
= vm_unmapped_area(&info
);
158 * A failed mmap() very likely causes application failure,
159 * so fall back to the bottom-up function here. This scenario
160 * can happen with large stack limits and large mmap()
163 if (addr
& ~PAGE_MASK
) {
164 VM_BUG_ON(addr
!= -ENOMEM
);
166 info
.low_limit
= mm
->mmap_base
;
167 info
.high_limit
= TASK_SIZE
;
168 addr
= vm_unmapped_area(&info
);
174 unsigned long arch_mmap_rnd(void)
178 rnd
= get_random_long() & ((1UL << mmap_rnd_bits
) - 1);
180 return rnd
<< PAGE_SHIFT
;
183 void arch_pick_mmap_layout(struct mm_struct
*mm
, struct rlimit
*rlim_stack
)
185 unsigned long random_factor
= 0UL;
187 if (current
->flags
& PF_RANDOMIZE
)
188 random_factor
= arch_mmap_rnd();
190 if (mmap_is_legacy(rlim_stack
)) {
191 mm
->mmap_base
= TASK_UNMAPPED_BASE
+ random_factor
;
192 mm
->get_unmapped_area
= arch_get_unmapped_area
;
194 mm
->mmap_base
= mmap_base(random_factor
, rlim_stack
);
195 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;
200 * You really shouldn't be using read() or write() on /dev/mem. This
201 * might go away in the future.
203 int valid_phys_addr_range(phys_addr_t addr
, size_t size
)
205 if (addr
< PHYS_OFFSET
)
207 if (addr
+ size
> __pa(high_memory
- 1) + 1)
214 * Do not allow /dev/mem mappings beyond the supported physical range.
216 int valid_mmap_phys_addr_range(unsigned long pfn
, size_t size
)
218 return (pfn
+ (size
>> PAGE_SHIFT
)) <= (1 + (PHYS_MASK
>> PAGE_SHIFT
));
221 #ifdef CONFIG_STRICT_DEVMEM
223 #include <linux/ioport.h>
226 * devmem_is_allowed() checks to see if /dev/mem access to a certain
227 * address is valid. The argument is a physical page number.
228 * We mimic x86 here by disallowing access to system RAM as well as
229 * device-exclusive MMIO regions. This effectively disable read()/write()
232 int devmem_is_allowed(unsigned long pfn
)
234 if (iomem_is_exclusive(pfn
<< PAGE_SHIFT
))
236 if (!page_is_ram(pfn
))