2 * linux/arch/arm/mm/mmap.c
6 #include <linux/mman.h>
8 #include <linux/sched.h>
10 #include <linux/random.h>
11 #include <asm/cputype.h>
12 #include <asm/system.h>
14 #define COLOUR_ALIGN(addr,pgoff) \
15 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
16 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
19 * We need to ensure that shared mappings are correctly aligned to
20 * avoid aliasing issues with VIPT caches. We need to ensure that
21 * a specific page of an object is always mapped at a multiple of
24 * We unconditionally provide this function for all cases, however
25 * in the VIVT case, we optimise out the alignment rules.
28 arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
29 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
31 struct mm_struct
*mm
= current
->mm
;
32 struct vm_area_struct
*vma
;
33 unsigned long start_addr
;
34 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
35 unsigned int cache_type
;
36 int do_align
= 0, aliasing
= 0;
39 * We only need to do colour alignment if either the I or D
40 * caches alias. This is indicated by bits 9 and 21 of the
41 * cache type register.
43 cache_type
= read_cpuid_cachetype();
44 if (cache_type
!= read_cpuid_id()) {
45 aliasing
= (cache_type
| cache_type
>> 12) & (1 << 11);
47 do_align
= filp
|| flags
& MAP_SHARED
;
55 * We enforce the MAP_FIXED case.
57 if (flags
& MAP_FIXED
) {
58 if (aliasing
&& flags
& MAP_SHARED
&&
59 (addr
- (pgoff
<< PAGE_SHIFT
)) & (SHMLBA
- 1))
69 addr
= COLOUR_ALIGN(addr
, pgoff
);
71 addr
= PAGE_ALIGN(addr
);
73 vma
= find_vma(mm
, addr
);
74 if (TASK_SIZE
- len
>= addr
&&
75 (!vma
|| addr
+ len
<= vma
->vm_start
))
78 if (len
> mm
->cached_hole_size
) {
79 start_addr
= addr
= mm
->free_area_cache
;
81 start_addr
= addr
= TASK_UNMAPPED_BASE
;
82 mm
->cached_hole_size
= 0;
84 /* 8 bits of randomness in 20 address space bits */
85 if (current
->flags
& PF_RANDOMIZE
)
86 addr
+= (get_random_int() % (1 << 8)) << PAGE_SHIFT
;
90 addr
= COLOUR_ALIGN(addr
, pgoff
);
92 addr
= PAGE_ALIGN(addr
);
94 for (vma
= find_vma(mm
, addr
); ; vma
= vma
->vm_next
) {
95 /* At this point: (!vma || addr < vma->vm_end). */
96 if (TASK_SIZE
- len
< addr
) {
98 * Start a new search - just in case we missed
101 if (start_addr
!= TASK_UNMAPPED_BASE
) {
102 start_addr
= addr
= TASK_UNMAPPED_BASE
;
103 mm
->cached_hole_size
= 0;
108 if (!vma
|| addr
+ len
<= vma
->vm_start
) {
110 * Remember the place where we stopped the search:
112 mm
->free_area_cache
= addr
+ len
;
115 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
116 mm
->cached_hole_size
= vma
->vm_start
- addr
;
119 addr
= COLOUR_ALIGN(addr
, pgoff
);
125 * You really shouldn't be using read() or write() on /dev/mem. This
126 * might go away in the future.
128 int valid_phys_addr_range(unsigned long addr
, size_t size
)
130 if (addr
< PHYS_OFFSET
)
132 if (addr
+ size
> __pa(high_memory
- 1) + 1)
139 * We don't use supersection mappings for mmap() on /dev/mem, which
140 * means that we can't map the memory area above the 4G barrier into
143 int valid_mmap_phys_addr_range(unsigned long pfn
, size_t size
)
145 return !(pfn
+ (size
>> PAGE_SHIFT
) > 0x00100000);
148 #ifdef CONFIG_STRICT_DEVMEM
150 #include <linux/ioport.h>
153 * devmem_is_allowed() checks to see if /dev/mem access to a certain
154 * address is valid. The argument is a physical page number.
155 * We mimic x86 here by disallowing access to system RAM as well as
156 * device-exclusive MMIO regions. This effectively disable read()/write()
159 int devmem_is_allowed(unsigned long pfn
)
161 if (iomem_is_exclusive(pfn
<< PAGE_SHIFT
))
163 if (!page_is_ram(pfn
))