2 * linux/arch/arm/mm/fault-armv.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2002 Russell King
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
15 #include <linux/bitops.h>
16 #include <linux/vmalloc.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
20 #include <asm/cacheflush.h>
21 #include <asm/pgtable.h>
22 #include <asm/tlbflush.h>
24 static unsigned long shared_pte_mask
= L_PTE_CACHEABLE
;
27 * We take the easy way out of this problem - we make the
28 * PTE uncacheable. However, we leave the write buffer on.
30 * Note that the pte lock held when calling update_mmu_cache must also
31 * guard the pte (somewhere else in the same mm) that we modify here.
32 * Therefore those configurations which might call adjust_pte (those
33 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
35 static int adjust_pte(struct vm_area_struct
*vma
, unsigned long address
)
42 pgd
= pgd_offset(vma
->vm_mm
, address
);
48 pmd
= pmd_offset(pgd
, address
);
54 pte
= pte_offset_map(pmd
, address
);
58 * If this page isn't present, or is already setup to
59 * fault (ie, is old), we can safely ignore any issues.
61 if (pte_present(entry
) && pte_val(entry
) & shared_pte_mask
) {
62 flush_cache_page(vma
, address
, pte_pfn(entry
));
63 pte_val(entry
) &= ~shared_pte_mask
;
64 set_pte_at(vma
->vm_mm
, address
, pte
, entry
);
65 flush_tlb_page(vma
, address
);
85 make_coherent(struct address_space
*mapping
, struct vm_area_struct
*vma
, unsigned long addr
, unsigned long pfn
)
87 struct mm_struct
*mm
= vma
->vm_mm
;
88 struct vm_area_struct
*mpnt
;
89 struct prio_tree_iter iter
;
94 pgoff
= vma
->vm_pgoff
+ ((addr
- vma
->vm_start
) >> PAGE_SHIFT
);
97 * If we have any shared mappings that are in the same mm
98 * space, then we need to handle them specially to maintain
101 flush_dcache_mmap_lock(mapping
);
102 vma_prio_tree_foreach(mpnt
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
104 * If this VMA is not in our MM, we can ignore it.
105 * Note that we intentionally mask out the VMA
106 * that we are fixing up.
108 if (mpnt
->vm_mm
!= mm
|| mpnt
== vma
)
110 if (!(mpnt
->vm_flags
& VM_MAYSHARE
))
112 offset
= (pgoff
- mpnt
->vm_pgoff
) << PAGE_SHIFT
;
113 aliases
+= adjust_pte(mpnt
, mpnt
->vm_start
+ offset
);
115 flush_dcache_mmap_unlock(mapping
);
117 adjust_pte(vma
, addr
);
119 flush_cache_page(vma
, addr
, pfn
);
123 * Take care of architecture specific things when placing a new PTE into
124 * a page table, or changing an existing PTE. Basically, there are two
125 * things that we need to take care of:
127 * 1. If PG_dcache_dirty is set for the page, we need to ensure
128 * that any cache entries for the kernels virtual memory
129 * range are written back to the page.
130 * 2. If we have multiple shared mappings of the same space in
131 * an object, we need to deal with the cache aliasing issues.
133 * Note that the pte lock will be held.
135 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long addr
, pte_t pte
)
137 unsigned long pfn
= pte_pfn(pte
);
138 struct address_space
*mapping
;
144 page
= pfn_to_page(pfn
);
145 mapping
= page_mapping(page
);
147 int dirty
= test_and_clear_bit(PG_dcache_dirty
, &page
->flags
);
150 __flush_dcache_page(mapping
, page
);
153 make_coherent(mapping
, vma
, addr
, pfn
);
158 * Check whether the write buffer has physical address aliasing
159 * issues. If it has, we need to avoid them for the case where
160 * we have several shared mappings of the same object in user
163 static int __init
check_writebuffer(unsigned long *p1
, unsigned long *p2
)
165 register unsigned long zero
= 0, one
= 1, val
;
179 void __init
check_writebuffer_bugs(void)
185 printk(KERN_INFO
"CPU: Testing write buffer coherency: ");
187 page
= alloc_page(GFP_KERNEL
);
189 unsigned long *p1
, *p2
;
190 pgprot_t prot
= __pgprot(L_PTE_PRESENT
|L_PTE_YOUNG
|
191 L_PTE_DIRTY
|L_PTE_WRITE
|
194 p1
= vmap(&page
, 1, VM_IOREMAP
, prot
);
195 p2
= vmap(&page
, 1, VM_IOREMAP
, prot
);
198 v
= check_writebuffer(p1
, p2
);
199 reason
= "enabling work-around";
201 reason
= "unable to map memory\n";
208 reason
= "unable to grab page\n";
212 printk("failed, %s\n", reason
);
213 shared_pte_mask
|= L_PTE_BUFFERABLE
;