2 * linux/arch/arm/mm/fault-armv.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2002 Russell King
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
15 #include <linux/bitops.h>
16 #include <linux/vmalloc.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
20 #include <asm/cacheflush.h>
21 #include <asm/pgtable.h>
22 #include <asm/tlbflush.h>
24 static unsigned long shared_pte_mask
= L_PTE_CACHEABLE
;
27 * We take the easy way out of this problem - we make the
28 * PTE uncacheable. However, we leave the write buffer on.
30 static int adjust_pte(struct vm_area_struct
*vma
, unsigned long address
)
37 pgd
= pgd_offset(vma
->vm_mm
, address
);
43 pmd
= pmd_offset(pgd
, address
);
49 pte
= pte_offset_map(pmd
, address
);
53 * If this page isn't present, or is already setup to
54 * fault (ie, is old), we can safely ignore any issues.
56 if (pte_present(entry
) && pte_val(entry
) & shared_pte_mask
) {
57 flush_cache_page(vma
, address
, pte_pfn(entry
));
58 pte_val(entry
) &= ~shared_pte_mask
;
60 flush_tlb_page(vma
, address
);
80 make_coherent(struct vm_area_struct
*vma
, unsigned long addr
, struct page
*page
, int dirty
)
82 struct address_space
*mapping
= page_mapping(page
);
83 struct mm_struct
*mm
= vma
->vm_mm
;
84 struct vm_area_struct
*mpnt
;
85 struct prio_tree_iter iter
;
93 pgoff
= vma
->vm_pgoff
+ ((addr
- vma
->vm_start
) >> PAGE_SHIFT
);
96 * If we have any shared mappings that are in the same mm
97 * space, then we need to handle them specially to maintain
100 flush_dcache_mmap_lock(mapping
);
101 vma_prio_tree_foreach(mpnt
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
103 * If this VMA is not in our MM, we can ignore it.
104 * Note that we intentionally mask out the VMA
105 * that we are fixing up.
107 if (mpnt
->vm_mm
!= mm
|| mpnt
== vma
)
109 if (!(mpnt
->vm_flags
& VM_MAYSHARE
))
111 offset
= (pgoff
- mpnt
->vm_pgoff
) << PAGE_SHIFT
;
112 aliases
+= adjust_pte(mpnt
, mpnt
->vm_start
+ offset
);
114 flush_dcache_mmap_unlock(mapping
);
116 adjust_pte(vma
, addr
);
118 flush_cache_page(vma
, addr
, page_to_pfn(page
));
122 * Take care of architecture specific things when placing a new PTE into
123 * a page table, or changing an existing PTE. Basically, there are two
124 * things that we need to take care of:
126 * 1. If PG_dcache_dirty is set for the page, we need to ensure
127 * that any cache entries for the kernels virtual memory
128 * range are written back to the page.
129 * 2. If we have multiple shared mappings of the same space in
130 * an object, we need to deal with the cache aliasing issues.
132 * Note that the page_table_lock will be held.
134 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long addr
, pte_t pte
)
136 unsigned long pfn
= pte_pfn(pte
);
141 page
= pfn_to_page(pfn
);
142 if (page_mapping(page
)) {
143 int dirty
= test_and_clear_bit(PG_dcache_dirty
, &page
->flags
);
147 * This is our first userspace mapping of this page.
148 * Ensure that the physical page is coherent with
149 * the kernel mapping.
151 * FIXME: only need to do this on VIVT and aliasing
152 * VIPT cache architectures. We can do that
153 * by choosing whether to set this bit...
155 __cpuc_flush_dcache_page(page_address(page
));
159 make_coherent(vma
, addr
, page
, dirty
);
164 * Check whether the write buffer has physical address aliasing
165 * issues. If it has, we need to avoid them for the case where
166 * we have several shared mappings of the same object in user
169 static int __init
check_writebuffer(unsigned long *p1
, unsigned long *p2
)
171 register unsigned long zero
= 0, one
= 1, val
;
185 void __init
check_writebuffer_bugs(void)
191 printk(KERN_INFO
"CPU: Testing write buffer coherency: ");
193 page
= alloc_page(GFP_KERNEL
);
195 unsigned long *p1
, *p2
;
196 pgprot_t prot
= __pgprot(L_PTE_PRESENT
|L_PTE_YOUNG
|
197 L_PTE_DIRTY
|L_PTE_WRITE
|
200 p1
= vmap(&page
, 1, VM_IOREMAP
, prot
);
201 p2
= vmap(&page
, 1, VM_IOREMAP
, prot
);
204 v
= check_writebuffer(p1
, p2
);
205 reason
= "enabling work-around";
207 reason
= "unable to map memory\n";
214 reason
= "unable to grab page\n";
218 printk("failed, %s\n", reason
);
219 shared_pte_mask
|= L_PTE_BUFFERABLE
;