Linux-2.6.12-rc2
[linux-2.6/next.git] / arch / x86_64 / mm / ioremap.c
blob74ec8554b195de6c5a9b87ce5d39f08d9c5da544
1 /*
2 * arch/x86_64/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/io.h>
15 #include <asm/pgalloc.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include <asm/proto.h>
21 #define ISA_START_ADDRESS 0xa0000
22 #define ISA_END_ADDRESS 0x100000
24 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
25 unsigned long phys_addr, unsigned long flags)
27 unsigned long end;
28 unsigned long pfn;
30 address &= ~PMD_MASK;
31 end = address + size;
32 if (end > PMD_SIZE)
33 end = PMD_SIZE;
34 if (address >= end)
35 BUG();
36 pfn = phys_addr >> PAGE_SHIFT;
37 do {
38 if (!pte_none(*pte)) {
39 printk("remap_area_pte: page already exists\n");
40 BUG();
42 set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
43 _PAGE_GLOBAL | _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
44 address += PAGE_SIZE;
45 pfn++;
46 pte++;
47 } while (address && (address < end));
50 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
51 unsigned long phys_addr, unsigned long flags)
53 unsigned long end;
55 address &= ~PUD_MASK;
56 end = address + size;
57 if (end > PUD_SIZE)
58 end = PUD_SIZE;
59 phys_addr -= address;
60 if (address >= end)
61 BUG();
62 do {
63 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
64 if (!pte)
65 return -ENOMEM;
66 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
67 address = (address + PMD_SIZE) & PMD_MASK;
68 pmd++;
69 } while (address && (address < end));
70 return 0;
73 static inline int remap_area_pud(pud_t * pud, unsigned long address, unsigned long size,
74 unsigned long phys_addr, unsigned long flags)
76 unsigned long end;
78 address &= ~PGDIR_MASK;
79 end = address + size;
80 if (end > PGDIR_SIZE)
81 end = PGDIR_SIZE;
82 phys_addr -= address;
83 if (address >= end)
84 BUG();
85 do {
86 pmd_t * pmd = pmd_alloc(&init_mm, pud, address);
87 if (!pmd)
88 return -ENOMEM;
89 remap_area_pmd(pmd, address, end - address, address + phys_addr, flags);
90 address = (address + PUD_SIZE) & PUD_MASK;
91 pud++;
92 } while (address && (address < end));
93 return 0;
96 static int remap_area_pages(unsigned long address, unsigned long phys_addr,
97 unsigned long size, unsigned long flags)
99 int error;
100 pgd_t *pgd;
101 unsigned long end = address + size;
103 phys_addr -= address;
104 pgd = pgd_offset_k(address);
105 flush_cache_all();
106 if (address >= end)
107 BUG();
108 spin_lock(&init_mm.page_table_lock);
109 do {
110 pud_t *pud;
111 pud = pud_alloc(&init_mm, pgd, address);
112 error = -ENOMEM;
113 if (!pud)
114 break;
115 if (remap_area_pud(pud, address, end - address,
116 phys_addr + address, flags))
117 break;
118 error = 0;
119 address = (address + PGDIR_SIZE) & PGDIR_MASK;
120 pgd++;
121 } while (address && (address < end));
122 spin_unlock(&init_mm.page_table_lock);
123 flush_tlb_all();
124 return error;
128 * Fix up the linear direct mapping of the kernel to avoid cache attribute
129 * conflicts.
131 static int
132 ioremap_change_attr(unsigned long phys_addr, unsigned long size,
133 unsigned long flags)
135 int err = 0;
136 if (flags && phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
137 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
138 unsigned long vaddr = (unsigned long) __va(phys_addr);
141 * Must use a address here and not struct page because the phys addr
142 * can be a in hole between nodes and not have an memmap entry.
144 err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags));
145 if (!err)
146 global_flush_tlb();
148 return err;
152 * Generic mapping function
156 * Remap an arbitrary physical address space into the kernel virtual
157 * address space. Needed when the kernel wants to access high addresses
158 * directly.
160 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
161 * have to convert them into an offset in a page-aligned mapping, but the
162 * caller shouldn't need to know that small detail.
164 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
166 void * addr;
167 struct vm_struct * area;
168 unsigned long offset, last_addr;
170 /* Don't allow wraparound or zero size */
171 last_addr = phys_addr + size - 1;
172 if (!size || last_addr < phys_addr)
173 return NULL;
176 * Don't remap the low PCI/ISA area, it's always mapped..
178 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
179 return (__force void __iomem *)phys_to_virt(phys_addr);
181 #ifndef CONFIG_DISCONTIGMEM
183 * Don't allow anybody to remap normal RAM that we're using..
185 if (last_addr < virt_to_phys(high_memory)) {
186 char *t_addr, *t_end;
187 struct page *page;
189 t_addr = __va(phys_addr);
190 t_end = t_addr + (size - 1);
192 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
193 if(!PageReserved(page))
194 return NULL;
196 #endif
199 * Mappings have to be page-aligned
201 offset = phys_addr & ~PAGE_MASK;
202 phys_addr &= PAGE_MASK;
203 size = PAGE_ALIGN(last_addr+1) - phys_addr;
206 * Ok, go for it..
208 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
209 if (!area)
210 return NULL;
211 area->phys_addr = phys_addr;
212 addr = area->addr;
213 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
214 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
215 return NULL;
217 if (ioremap_change_attr(phys_addr, size, flags) < 0) {
218 area->flags &= 0xffffff;
219 vunmap(addr);
220 return NULL;
222 return (__force void __iomem *) (offset + (char *)addr);
226 * ioremap_nocache - map bus memory into CPU space
227 * @offset: bus address of the memory
228 * @size: size of the resource to map
230 * ioremap_nocache performs a platform specific sequence of operations to
231 * make bus memory CPU accessible via the readb/readw/readl/writeb/
232 * writew/writel functions and the other mmio helpers. The returned
233 * address is not guaranteed to be usable directly as a virtual
234 * address.
236 * This version of ioremap ensures that the memory is marked uncachable
237 * on the CPU as well as honouring existing caching rules from things like
238 * the PCI bus. Note that there are other caches and buffers on many
239 * busses. In particular driver authors should read up on PCI writes
241 * It's useful if some control registers are in such an area and
242 * write combining or read caching is not desirable:
244 * Must be freed with iounmap.
247 void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
249 return __ioremap(phys_addr, size, _PAGE_PCD);
252 void iounmap(volatile void __iomem *addr)
254 struct vm_struct *p, **pprev;
256 if (addr <= high_memory)
257 return;
258 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
259 addr < phys_to_virt(ISA_END_ADDRESS))
260 return;
262 write_lock(&vmlist_lock);
263 for (p = vmlist, pprev = &vmlist; p != NULL; pprev = &p->next, p = *pprev)
264 if (p->addr == (void *)(PAGE_MASK & (unsigned long)addr))
265 break;
266 if (!p) {
267 printk("__iounmap: bad address %p\n", addr);
268 goto out_unlock;
270 *pprev = p->next;
271 unmap_vm_area(p);
272 if ((p->flags >> 20) &&
273 p->phys_addr + p->size - 1 < virt_to_phys(high_memory)) {
274 /* p->size includes the guard page, but cpa doesn't like that */
275 change_page_attr(virt_to_page(__va(p->phys_addr)),
276 p->size >> PAGE_SHIFT,
277 PAGE_KERNEL);
278 global_flush_tlb();
280 out_unlock:
281 write_unlock(&vmlist_lock);
282 kfree(p);