[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / arch / arm / mm / fault-armv.c
blob01967ddeef53df2cb72bda5d600419e63c392f82
1 /*
2 * linux/arch/arm/mm/fault-armv.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2002 Russell King
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/bitops.h>
16 #include <linux/vmalloc.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
20 #include <asm/cacheflush.h>
21 #include <asm/pgtable.h>
22 #include <asm/tlbflush.h>
24 static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
27 * We take the easy way out of this problem - we make the
28 * PTE uncacheable. However, we leave the write buffer on.
30 static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
32 pgd_t *pgd;
33 pmd_t *pmd;
34 pte_t *pte, entry;
35 int ret = 0;
37 pgd = pgd_offset(vma->vm_mm, address);
38 if (pgd_none(*pgd))
39 goto no_pgd;
40 if (pgd_bad(*pgd))
41 goto bad_pgd;
43 pmd = pmd_offset(pgd, address);
44 if (pmd_none(*pmd))
45 goto no_pmd;
46 if (pmd_bad(*pmd))
47 goto bad_pmd;
49 pte = pte_offset_map(pmd, address);
50 entry = *pte;
53 * If this page isn't present, or is already setup to
54 * fault (ie, is old), we can safely ignore any issues.
56 if (pte_present(entry) && pte_val(entry) & shared_pte_mask) {
57 flush_cache_page(vma, address, pte_pfn(entry));
58 pte_val(entry) &= ~shared_pte_mask;
59 set_pte(pte, entry);
60 flush_tlb_page(vma, address);
61 ret = 1;
63 pte_unmap(pte);
64 return ret;
66 bad_pgd:
67 pgd_ERROR(*pgd);
68 pgd_clear(pgd);
69 no_pgd:
70 return 0;
72 bad_pmd:
73 pmd_ERROR(*pmd);
74 pmd_clear(pmd);
75 no_pmd:
76 return 0;
79 static void
80 make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty)
82 struct address_space *mapping = page_mapping(page);
83 struct mm_struct *mm = vma->vm_mm;
84 struct vm_area_struct *mpnt;
85 struct prio_tree_iter iter;
86 unsigned long offset;
87 pgoff_t pgoff;
88 int aliases = 0;
90 if (!mapping)
91 return;
93 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
96 * If we have any shared mappings that are in the same mm
97 * space, then we need to handle them specially to maintain
98 * cache coherency.
100 flush_dcache_mmap_lock(mapping);
101 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
103 * If this VMA is not in our MM, we can ignore it.
104 * Note that we intentionally mask out the VMA
105 * that we are fixing up.
107 if (mpnt->vm_mm != mm || mpnt == vma)
108 continue;
109 if (!(mpnt->vm_flags & VM_MAYSHARE))
110 continue;
111 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
112 aliases += adjust_pte(mpnt, mpnt->vm_start + offset);
114 flush_dcache_mmap_unlock(mapping);
115 if (aliases)
116 adjust_pte(vma, addr);
117 else
118 flush_cache_page(vma, addr, page_to_pfn(page));
122 * Take care of architecture specific things when placing a new PTE into
123 * a page table, or changing an existing PTE. Basically, there are two
124 * things that we need to take care of:
126 * 1. If PG_dcache_dirty is set for the page, we need to ensure
127 * that any cache entries for the kernels virtual memory
128 * range are written back to the page.
129 * 2. If we have multiple shared mappings of the same space in
130 * an object, we need to deal with the cache aliasing issues.
132 * Note that the page_table_lock will be held.
134 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
136 unsigned long pfn = pte_pfn(pte);
137 struct page *page;
139 if (!pfn_valid(pfn))
140 return;
141 page = pfn_to_page(pfn);
142 if (page_mapping(page)) {
143 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
145 if (dirty) {
147 * This is our first userspace mapping of this page.
148 * Ensure that the physical page is coherent with
149 * the kernel mapping.
151 * FIXME: only need to do this on VIVT and aliasing
152 * VIPT cache architectures. We can do that
153 * by choosing whether to set this bit...
155 __cpuc_flush_dcache_page(page_address(page));
158 if (cache_is_vivt())
159 make_coherent(vma, addr, page, dirty);
164 * Check whether the write buffer has physical address aliasing
165 * issues. If it has, we need to avoid them for the case where
166 * we have several shared mappings of the same object in user
167 * space.
169 static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
171 register unsigned long zero = 0, one = 1, val;
173 local_irq_disable();
174 mb();
175 *p1 = one;
176 mb();
177 *p2 = zero;
178 mb();
179 val = *p1;
180 mb();
181 local_irq_enable();
182 return val != zero;
185 void __init check_writebuffer_bugs(void)
187 struct page *page;
188 const char *reason;
189 unsigned long v = 1;
191 printk(KERN_INFO "CPU: Testing write buffer coherency: ");
193 page = alloc_page(GFP_KERNEL);
194 if (page) {
195 unsigned long *p1, *p2;
196 pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG|
197 L_PTE_DIRTY|L_PTE_WRITE|
198 L_PTE_BUFFERABLE);
200 p1 = vmap(&page, 1, VM_IOREMAP, prot);
201 p2 = vmap(&page, 1, VM_IOREMAP, prot);
203 if (p1 && p2) {
204 v = check_writebuffer(p1, p2);
205 reason = "enabling work-around";
206 } else {
207 reason = "unable to map memory\n";
210 vunmap(p1);
211 vunmap(p2);
212 put_page(page);
213 } else {
214 reason = "unable to grab page\n";
217 if (v) {
218 printk("failed, %s\n", reason);
219 shared_pte_mask |= L_PTE_BUFFERABLE;
220 } else {
221 printk("ok\n");