[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / arch / i386 / mm / ioremap.c
blobdb06f7399913f7da8a3b42d110f0dc24b41272cb
1 /*
2 * arch/i386/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/io.h>
15 #include <asm/fixmap.h>
16 #include <asm/cacheflush.h>
17 #include <asm/tlbflush.h>
18 #include <asm/pgtable.h>
20 #define ISA_START_ADDRESS 0xa0000
21 #define ISA_END_ADDRESS 0x100000
23 static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
24 unsigned long end, unsigned long phys_addr, unsigned long flags)
26 pte_t *pte;
27 unsigned long pfn;
29 pfn = phys_addr >> PAGE_SHIFT;
30 pte = pte_alloc_kernel(&init_mm, pmd, addr);
31 if (!pte)
32 return -ENOMEM;
33 do {
34 BUG_ON(!pte_none(*pte));
35 set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
36 _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
37 pfn++;
38 } while (pte++, addr += PAGE_SIZE, addr != end);
39 return 0;
42 static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
43 unsigned long end, unsigned long phys_addr, unsigned long flags)
45 pmd_t *pmd;
46 unsigned long next;
48 phys_addr -= addr;
49 pmd = pmd_alloc(&init_mm, pud, addr);
50 if (!pmd)
51 return -ENOMEM;
52 do {
53 next = pmd_addr_end(addr, end);
54 if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, flags))
55 return -ENOMEM;
56 } while (pmd++, addr = next, addr != end);
57 return 0;
60 static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
61 unsigned long end, unsigned long phys_addr, unsigned long flags)
63 pud_t *pud;
64 unsigned long next;
66 phys_addr -= addr;
67 pud = pud_alloc(&init_mm, pgd, addr);
68 if (!pud)
69 return -ENOMEM;
70 do {
71 next = pud_addr_end(addr, end);
72 if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, flags))
73 return -ENOMEM;
74 } while (pud++, addr = next, addr != end);
75 return 0;
78 static int ioremap_page_range(unsigned long addr,
79 unsigned long end, unsigned long phys_addr, unsigned long flags)
81 pgd_t *pgd;
82 unsigned long next;
83 int err;
85 BUG_ON(addr >= end);
86 flush_cache_all();
87 phys_addr -= addr;
88 pgd = pgd_offset_k(addr);
89 spin_lock(&init_mm.page_table_lock);
90 do {
91 next = pgd_addr_end(addr, end);
92 err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags);
93 if (err)
94 break;
95 } while (pgd++, addr = next, addr != end);
96 spin_unlock(&init_mm.page_table_lock);
97 flush_tlb_all();
98 return err;
102 * Generic mapping function (not visible outside):
106 * Remap an arbitrary physical address space into the kernel virtual
107 * address space. Needed when the kernel wants to access high addresses
108 * directly.
110 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
111 * have to convert them into an offset in a page-aligned mapping, but the
112 * caller shouldn't need to know that small detail.
114 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
116 void __iomem * addr;
117 struct vm_struct * area;
118 unsigned long offset, last_addr;
120 /* Don't allow wraparound or zero size */
121 last_addr = phys_addr + size - 1;
122 if (!size || last_addr < phys_addr)
123 return NULL;
126 * Don't remap the low PCI/ISA area, it's always mapped..
128 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
129 return (void __iomem *) phys_to_virt(phys_addr);
132 * Don't allow anybody to remap normal RAM that we're using..
134 if (phys_addr <= virt_to_phys(high_memory - 1)) {
135 char *t_addr, *t_end;
136 struct page *page;
138 t_addr = __va(phys_addr);
139 t_end = t_addr + (size - 1);
141 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
142 if(!PageReserved(page))
143 return NULL;
147 * Mappings have to be page-aligned
149 offset = phys_addr & ~PAGE_MASK;
150 phys_addr &= PAGE_MASK;
151 size = PAGE_ALIGN(last_addr+1) - phys_addr;
154 * Ok, go for it..
156 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
157 if (!area)
158 return NULL;
159 area->phys_addr = phys_addr;
160 addr = (void __iomem *) area->addr;
161 if (ioremap_page_range((unsigned long) addr,
162 (unsigned long) addr + size, phys_addr, flags)) {
163 vunmap((void __force *) addr);
164 return NULL;
166 return (void __iomem *) (offset + (char __iomem *)addr);
171 * ioremap_nocache - map bus memory into CPU space
172 * @offset: bus address of the memory
173 * @size: size of the resource to map
175 * ioremap_nocache performs a platform specific sequence of operations to
176 * make bus memory CPU accessible via the readb/readw/readl/writeb/
177 * writew/writel functions and the other mmio helpers. The returned
178 * address is not guaranteed to be usable directly as a virtual
179 * address.
181 * This version of ioremap ensures that the memory is marked uncachable
182 * on the CPU as well as honouring existing caching rules from things like
183 * the PCI bus. Note that there are other caches and buffers on many
184 * busses. In particular driver authors should read up on PCI writes
186 * It's useful if some control registers are in such an area and
187 * write combining or read caching is not desirable:
189 * Must be freed with iounmap.
192 void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
194 unsigned long last_addr;
195 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
196 if (!p)
197 return p;
199 /* Guaranteed to be > phys_addr, as per __ioremap() */
200 last_addr = phys_addr + size - 1;
202 if (last_addr < virt_to_phys(high_memory) - 1) {
203 struct page *ppage = virt_to_page(__va(phys_addr));
204 unsigned long npages;
206 phys_addr &= PAGE_MASK;
208 /* This might overflow and become zero.. */
209 last_addr = PAGE_ALIGN(last_addr);
211 /* .. but that's ok, because modulo-2**n arithmetic will make
212 * the page-aligned "last - first" come out right.
214 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
216 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
217 iounmap(p);
218 p = NULL;
220 global_flush_tlb();
223 return p;
226 void iounmap(volatile void __iomem *addr)
228 struct vm_struct *p;
229 if ((void __force *) addr <= high_memory)
230 return;
233 * __ioremap special-cases the PCI/ISA range by not instantiating a
234 * vm_area and by simply returning an address into the kernel mapping
235 * of ISA space. So handle that here.
237 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
238 addr < phys_to_virt(ISA_END_ADDRESS))
239 return;
241 p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
242 if (!p) {
243 printk("__iounmap: bad address %p\n", addr);
244 return;
247 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
248 /* p->size includes the guard page, but cpa doesn't like that */
249 change_page_attr(virt_to_page(__va(p->phys_addr)),
250 p->size >> PAGE_SHIFT,
251 PAGE_KERNEL);
252 global_flush_tlb();
254 kfree(p);
257 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
259 unsigned long offset, last_addr;
260 unsigned int nrpages;
261 enum fixed_addresses idx;
263 /* Don't allow wraparound or zero size */
264 last_addr = phys_addr + size - 1;
265 if (!size || last_addr < phys_addr)
266 return NULL;
269 * Don't remap the low PCI/ISA area, it's always mapped..
271 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
272 return phys_to_virt(phys_addr);
275 * Mappings have to be page-aligned
277 offset = phys_addr & ~PAGE_MASK;
278 phys_addr &= PAGE_MASK;
279 size = PAGE_ALIGN(last_addr) - phys_addr;
282 * Mappings have to fit in the FIX_BTMAP area.
284 nrpages = size >> PAGE_SHIFT;
285 if (nrpages > NR_FIX_BTMAPS)
286 return NULL;
289 * Ok, go for it..
291 idx = FIX_BTMAP_BEGIN;
292 while (nrpages > 0) {
293 set_fixmap(idx, phys_addr);
294 phys_addr += PAGE_SIZE;
295 --idx;
296 --nrpages;
298 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
301 void __init bt_iounmap(void *addr, unsigned long size)
303 unsigned long virt_addr;
304 unsigned long offset;
305 unsigned int nrpages;
306 enum fixed_addresses idx;
308 virt_addr = (unsigned long)addr;
309 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
310 return;
311 offset = virt_addr & ~PAGE_MASK;
312 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
314 idx = FIX_BTMAP_BEGIN;
315 while (nrpages > 0) {
316 clear_fixmap(idx);
317 --idx;
318 --nrpages;