4 * Copyright (C) 2008 - 2009 Paul Mundt
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/sched/mm.h>
13 #include <linux/mman.h>
14 #include <linux/module.h>
16 #include <asm/processor.h>
18 unsigned long shm_align_mask
= PAGE_SIZE
- 1; /* Sane caches */
19 EXPORT_SYMBOL(shm_align_mask
);
23 * To avoid cache aliases, we map the shared page with same color.
25 static inline unsigned long COLOUR_ALIGN(unsigned long addr
,
28 unsigned long base
= (addr
+ shm_align_mask
) & ~shm_align_mask
;
29 unsigned long off
= (pgoff
<< PAGE_SHIFT
) & shm_align_mask
;
34 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
35 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
37 struct mm_struct
*mm
= current
->mm
;
38 struct vm_area_struct
*vma
;
40 struct vm_unmapped_area_info info
;
42 if (flags
& MAP_FIXED
) {
43 /* We do not accept a shared mapping if it would violate
44 * cache aliasing constraints.
46 if ((flags
& MAP_SHARED
) &&
47 ((addr
- (pgoff
<< PAGE_SHIFT
)) & shm_align_mask
))
52 if (unlikely(len
> TASK_SIZE
))
56 if (filp
|| (flags
& MAP_SHARED
))
61 addr
= COLOUR_ALIGN(addr
, pgoff
);
63 addr
= PAGE_ALIGN(addr
);
65 vma
= find_vma(mm
, addr
);
66 if (TASK_SIZE
- len
>= addr
&&
67 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
73 info
.low_limit
= TASK_UNMAPPED_BASE
;
74 info
.high_limit
= TASK_SIZE
;
75 info
.align_mask
= do_colour_align
? (PAGE_MASK
& shm_align_mask
) : 0;
76 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
77 return vm_unmapped_area(&info
);
81 arch_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
82 const unsigned long len
, const unsigned long pgoff
,
83 const unsigned long flags
)
85 struct vm_area_struct
*vma
;
86 struct mm_struct
*mm
= current
->mm
;
87 unsigned long addr
= addr0
;
89 struct vm_unmapped_area_info info
;
91 if (flags
& MAP_FIXED
) {
92 /* We do not accept a shared mapping if it would violate
93 * cache aliasing constraints.
95 if ((flags
& MAP_SHARED
) &&
96 ((addr
- (pgoff
<< PAGE_SHIFT
)) & shm_align_mask
))
101 if (unlikely(len
> TASK_SIZE
))
105 if (filp
|| (flags
& MAP_SHARED
))
108 /* requesting a specific address */
111 addr
= COLOUR_ALIGN(addr
, pgoff
);
113 addr
= PAGE_ALIGN(addr
);
115 vma
= find_vma(mm
, addr
);
116 if (TASK_SIZE
- len
>= addr
&&
117 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
121 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
123 info
.low_limit
= PAGE_SIZE
;
124 info
.high_limit
= mm
->mmap_base
;
125 info
.align_mask
= do_colour_align
? (PAGE_MASK
& shm_align_mask
) : 0;
126 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
127 addr
= vm_unmapped_area(&info
);
130 * A failed mmap() very likely causes application failure,
131 * so fall back to the bottom-up function here. This scenario
132 * can happen with large stack limits and large mmap()
135 if (addr
& ~PAGE_MASK
) {
136 VM_BUG_ON(addr
!= -ENOMEM
);
138 info
.low_limit
= TASK_UNMAPPED_BASE
;
139 info
.high_limit
= TASK_SIZE
;
140 addr
= vm_unmapped_area(&info
);
145 #endif /* CONFIG_MMU */
148 * You really shouldn't be using read() or write() on /dev/mem. This
149 * might go away in the future.
151 int valid_phys_addr_range(phys_addr_t addr
, size_t count
)
153 if (addr
< __MEMORY_START
)
155 if (addr
+ count
> __pa(high_memory
))
161 int valid_mmap_phys_addr_range(unsigned long pfn
, size_t size
)