x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / sh / mm / mmap.c
blob6a1a1297baaeee75ded9e04bc89a402f5c2ef2f0
1 /*
2 * arch/sh/mm/mmap.c
4 * Copyright (C) 2008 - 2009 Paul Mundt
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10 #include <linux/io.h>
11 #include <linux/mm.h>
12 #include <linux/sched/mm.h>
13 #include <linux/mman.h>
14 #include <linux/module.h>
15 #include <asm/page.h>
16 #include <asm/processor.h>
18 unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
19 EXPORT_SYMBOL(shm_align_mask);
21 #ifdef CONFIG_MMU
23 * To avoid cache aliases, we map the shared page with same color.
25 static inline unsigned long COLOUR_ALIGN(unsigned long addr,
26 unsigned long pgoff)
28 unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
29 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
31 return base + off;
34 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
35 unsigned long len, unsigned long pgoff, unsigned long flags)
37 struct mm_struct *mm = current->mm;
38 struct vm_area_struct *vma;
39 int do_colour_align;
40 struct vm_unmapped_area_info info;
42 if (flags & MAP_FIXED) {
43 /* We do not accept a shared mapping if it would violate
44 * cache aliasing constraints.
46 if ((flags & MAP_SHARED) &&
47 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
48 return -EINVAL;
49 return addr;
52 if (unlikely(len > TASK_SIZE))
53 return -ENOMEM;
55 do_colour_align = 0;
56 if (filp || (flags & MAP_SHARED))
57 do_colour_align = 1;
59 if (addr) {
60 if (do_colour_align)
61 addr = COLOUR_ALIGN(addr, pgoff);
62 else
63 addr = PAGE_ALIGN(addr);
65 vma = find_vma(mm, addr);
66 if (TASK_SIZE - len >= addr &&
67 (!vma || addr + len <= vm_start_gap(vma)))
68 return addr;
71 info.flags = 0;
72 info.length = len;
73 info.low_limit = TASK_UNMAPPED_BASE;
74 info.high_limit = TASK_SIZE;
75 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
76 info.align_offset = pgoff << PAGE_SHIFT;
77 return vm_unmapped_area(&info);
80 unsigned long
81 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82 const unsigned long len, const unsigned long pgoff,
83 const unsigned long flags)
85 struct vm_area_struct *vma;
86 struct mm_struct *mm = current->mm;
87 unsigned long addr = addr0;
88 int do_colour_align;
89 struct vm_unmapped_area_info info;
91 if (flags & MAP_FIXED) {
92 /* We do not accept a shared mapping if it would violate
93 * cache aliasing constraints.
95 if ((flags & MAP_SHARED) &&
96 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
97 return -EINVAL;
98 return addr;
101 if (unlikely(len > TASK_SIZE))
102 return -ENOMEM;
104 do_colour_align = 0;
105 if (filp || (flags & MAP_SHARED))
106 do_colour_align = 1;
108 /* requesting a specific address */
109 if (addr) {
110 if (do_colour_align)
111 addr = COLOUR_ALIGN(addr, pgoff);
112 else
113 addr = PAGE_ALIGN(addr);
115 vma = find_vma(mm, addr);
116 if (TASK_SIZE - len >= addr &&
117 (!vma || addr + len <= vm_start_gap(vma)))
118 return addr;
121 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
122 info.length = len;
123 info.low_limit = PAGE_SIZE;
124 info.high_limit = mm->mmap_base;
125 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
126 info.align_offset = pgoff << PAGE_SHIFT;
127 addr = vm_unmapped_area(&info);
130 * A failed mmap() very likely causes application failure,
131 * so fall back to the bottom-up function here. This scenario
132 * can happen with large stack limits and large mmap()
133 * allocations.
135 if (addr & ~PAGE_MASK) {
136 VM_BUG_ON(addr != -ENOMEM);
137 info.flags = 0;
138 info.low_limit = TASK_UNMAPPED_BASE;
139 info.high_limit = TASK_SIZE;
140 addr = vm_unmapped_area(&info);
143 return addr;
145 #endif /* CONFIG_MMU */
148 * You really shouldn't be using read() or write() on /dev/mem. This
149 * might go away in the future.
151 int valid_phys_addr_range(phys_addr_t addr, size_t count)
153 if (addr < __MEMORY_START)
154 return 0;
155 if (addr + count > __pa(high_memory))
156 return 0;
158 return 1;
161 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
163 return 1;