[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / arch / m32r / mm / ioremap.c
blob70c59055c19c179a6ad316bec94810a6e077dd42
1 /*
2 * linux/arch/m32r/mm/ioremap.c
4 * Copyright (c) 2001, 2002 Hiroyuki Kondo
6 * Taken from mips version.
7 * (C) Copyright 1995 1996 Linus Torvalds
8 * (C) Copyright 2001 Ralf Baechle
9 */
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive
14 * for more details.
18 #include <linux/module.h>
19 #include <asm/addrspace.h>
20 #include <asm/byteorder.h>
22 #include <linux/vmalloc.h>
23 #include <asm/io.h>
24 #include <asm/pgalloc.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
28 static inline void
29 remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
30 unsigned long phys_addr, unsigned long flags)
32 unsigned long end;
33 unsigned long pfn;
34 pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ
35 | _PAGE_WRITE | flags);
37 address &= ~PMD_MASK;
38 end = address + size;
39 if (end > PMD_SIZE)
40 end = PMD_SIZE;
41 if (address >= end)
42 BUG();
43 pfn = phys_addr >> PAGE_SHIFT;
44 do {
45 if (!pte_none(*pte)) {
46 printk("remap_area_pte: page already exists\n");
47 BUG();
49 set_pte(pte, pfn_pte(pfn, pgprot));
50 address += PAGE_SIZE;
51 pfn++;
52 pte++;
53 } while (address && (address < end));
56 static inline int
57 remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
58 unsigned long phys_addr, unsigned long flags)
60 unsigned long end;
62 address &= ~PGDIR_MASK;
63 end = address + size;
64 if (end > PGDIR_SIZE)
65 end = PGDIR_SIZE;
66 phys_addr -= address;
67 if (address >= end)
68 BUG();
69 do {
70 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
71 if (!pte)
72 return -ENOMEM;
73 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
74 address = (address + PMD_SIZE) & PMD_MASK;
75 pmd++;
76 } while (address && (address < end));
77 return 0;
80 static int
81 remap_area_pages(unsigned long address, unsigned long phys_addr,
82 unsigned long size, unsigned long flags)
84 int error;
85 pgd_t * dir;
86 unsigned long end = address + size;
88 phys_addr -= address;
89 dir = pgd_offset(&init_mm, address);
90 flush_cache_all();
91 if (address >= end)
92 BUG();
93 spin_lock(&init_mm.page_table_lock);
94 do {
95 pmd_t *pmd;
96 pmd = pmd_alloc(&init_mm, dir, address);
97 error = -ENOMEM;
98 if (!pmd)
99 break;
100 if (remap_area_pmd(pmd, address, end - address,
101 phys_addr + address, flags))
102 break;
103 error = 0;
104 address = (address + PGDIR_SIZE) & PGDIR_MASK;
105 dir++;
106 } while (address && (address < end));
107 spin_unlock(&init_mm.page_table_lock);
108 flush_tlb_all();
109 return error;
113 * Generic mapping function (not visible outside):
117 * Remap an arbitrary physical address space into the kernel virtual
118 * address space. Needed when the kernel wants to access high addresses
119 * directly.
121 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
122 * have to convert them into an offset in a page-aligned mapping, but the
123 * caller shouldn't need to know that small detail.
126 #define IS_LOW512(addr) (!((unsigned long)(addr) & ~0x1fffffffUL))
128 void __iomem *
129 __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
131 void __iomem * addr;
132 struct vm_struct * area;
133 unsigned long offset, last_addr;
135 /* Don't allow wraparound or zero size */
136 last_addr = phys_addr + size - 1;
137 if (!size || last_addr < phys_addr)
138 return NULL;
141 * Map objects in the low 512mb of address space using KSEG1, otherwise
142 * map using page tables.
144 if (IS_LOW512(phys_addr) && IS_LOW512(phys_addr + size - 1))
145 return (void *) KSEG1ADDR(phys_addr);
148 * Don't allow anybody to remap normal RAM that we're using..
150 if (phys_addr < virt_to_phys(high_memory)) {
151 char *t_addr, *t_end;
152 struct page *page;
154 t_addr = __va(phys_addr);
155 t_end = t_addr + (size - 1);
157 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
158 if(!PageReserved(page))
159 return NULL;
163 * Mappings have to be page-aligned
165 offset = phys_addr & ~PAGE_MASK;
166 phys_addr &= PAGE_MASK;
167 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
170 * Ok, go for it..
172 area = get_vm_area(size, VM_IOREMAP);
173 if (!area)
174 return NULL;
175 area->phys_addr = phys_addr;
176 addr = (void __iomem *) area->addr;
177 if (remap_area_pages((unsigned long)addr, phys_addr, size, flags)) {
178 vunmap((void __force *) addr);
179 return NULL;
182 return (void __iomem *) (offset + (char __iomem *)addr);
185 #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1)
187 void iounmap(volatile void __iomem *addr)
189 if (!IS_KSEG1(addr))
190 vfree((void *) (PAGE_MASK & (unsigned long) addr));