2 * linux/arch/arm/mm/mmap.c
6 #include <linux/mman.h>
8 #include <linux/sched.h>
10 #include <linux/personality.h>
11 #include <linux/random.h>
12 #include <asm/cputype.h>
13 #include <asm/system.h>
15 #define COLOUR_ALIGN(addr,pgoff) \
16 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
17 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
20 * We need to ensure that shared mappings are correctly aligned to
21 * avoid aliasing issues with VIPT caches. We need to ensure that
22 * a specific page of an object is always mapped at a multiple of
25 * We unconditionally provide this function for all cases, however
26 * in the VIVT case, we optimise out the alignment rules.
29 arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
30 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
32 struct mm_struct
*mm
= current
->mm
;
33 struct vm_area_struct
*vma
;
34 unsigned long start_addr
;
35 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
36 unsigned int cache_type
;
37 int do_align
= 0, aliasing
= 0;
40 * We only need to do colour alignment if either the I or D
41 * caches alias. This is indicated by bits 9 and 21 of the
42 * cache type register.
44 cache_type
= read_cpuid_cachetype();
45 if (cache_type
!= read_cpuid_id()) {
46 aliasing
= (cache_type
| cache_type
>> 12) & (1 << 11);
48 do_align
= filp
|| flags
& MAP_SHARED
;
56 * We enforce the MAP_FIXED case.
58 if (flags
& MAP_FIXED
) {
59 if (aliasing
&& flags
& MAP_SHARED
&&
60 (addr
- (pgoff
<< PAGE_SHIFT
)) & (SHMLBA
- 1))
70 addr
= COLOUR_ALIGN(addr
, pgoff
);
72 addr
= PAGE_ALIGN(addr
);
74 vma
= find_vma(mm
, addr
);
75 if (TASK_SIZE
- len
>= addr
&&
76 (!vma
|| addr
+ len
<= vma
->vm_start
))
79 if (len
> mm
->cached_hole_size
) {
80 start_addr
= addr
= mm
->free_area_cache
;
82 start_addr
= addr
= TASK_UNMAPPED_BASE
;
83 mm
->cached_hole_size
= 0;
85 /* 8 bits of randomness in 20 address space bits */
86 if ((current
->flags
& PF_RANDOMIZE
) &&
87 !(current
->personality
& ADDR_NO_RANDOMIZE
))
88 addr
+= (get_random_int() % (1 << 8)) << PAGE_SHIFT
;
92 addr
= COLOUR_ALIGN(addr
, pgoff
);
94 addr
= PAGE_ALIGN(addr
);
96 for (vma
= find_vma(mm
, addr
); ; vma
= vma
->vm_next
) {
97 /* At this point: (!vma || addr < vma->vm_end). */
98 if (TASK_SIZE
- len
< addr
) {
100 * Start a new search - just in case we missed
103 if (start_addr
!= TASK_UNMAPPED_BASE
) {
104 start_addr
= addr
= TASK_UNMAPPED_BASE
;
105 mm
->cached_hole_size
= 0;
110 if (!vma
|| addr
+ len
<= vma
->vm_start
) {
112 * Remember the place where we stopped the search:
114 mm
->free_area_cache
= addr
+ len
;
117 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
118 mm
->cached_hole_size
= vma
->vm_start
- addr
;
121 addr
= COLOUR_ALIGN(addr
, pgoff
);
127 * You really shouldn't be using read() or write() on /dev/mem. This
128 * might go away in the future.
130 int valid_phys_addr_range(unsigned long addr
, size_t size
)
132 if (addr
< PHYS_OFFSET
)
134 if (addr
+ size
> __pa(high_memory
- 1) + 1)
141 * We don't use supersection mappings for mmap() on /dev/mem, which
142 * means that we can't map the memory area above the 4G barrier into
145 int valid_mmap_phys_addr_range(unsigned long pfn
, size_t size
)
147 return !(pfn
+ (size
>> PAGE_SHIFT
) > 0x00100000);
150 #ifdef CONFIG_STRICT_DEVMEM
152 #include <linux/ioport.h>
155 * devmem_is_allowed() checks to see if /dev/mem access to a certain
156 * address is valid. The argument is a physical page number.
157 * We mimic x86 here by disallowing access to system RAM as well as
158 * device-exclusive MMIO regions. This effectively disable read()/write()
161 int devmem_is_allowed(unsigned long pfn
)
163 if (iomem_is_exclusive(pfn
<< PAGE_SHIFT
))
165 if (!page_is_ram(pfn
))