2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * arch/sh64/mm/ioremap.c
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003, 2004 Paul Mundt
11 * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly
12 * derived from arch/i386/mm/ioremap.c .
14 * (C) Copyright 1995 1996 Linus Torvalds
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
22 #include <asm/pgalloc.h>
23 #include <asm/tlbflush.h>
24 #include <linux/ioport.h>
25 #include <linux/bootmem.h>
26 #include <linux/proc_fs.h>
28 static void shmedia_mapioaddr(unsigned long, unsigned long);
29 static unsigned long shmedia_ioremap(struct resource
*, u32
, int);
32 * Generic mapping function (not visible outside):
36 * Remap an arbitrary physical address space into the kernel virtual
37 * address space. Needed when the kernel wants to access high addresses
40 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
41 * have to convert them into an offset in a page-aligned mapping, but the
42 * caller shouldn't need to know that small detail.
44 void * __ioremap(unsigned long phys_addr
, unsigned long size
, unsigned long flags
)
47 struct vm_struct
* area
;
48 unsigned long offset
, last_addr
;
51 /* Don't allow wraparound or zero size */
52 last_addr
= phys_addr
+ size
- 1;
53 if (!size
|| last_addr
< phys_addr
)
56 pgprot
= __pgprot(_PAGE_PRESENT
| _PAGE_READ
|
57 _PAGE_WRITE
| _PAGE_DIRTY
|
58 _PAGE_ACCESSED
| _PAGE_SHARED
| flags
);
61 * Mappings have to be page-aligned
63 offset
= phys_addr
& ~PAGE_MASK
;
64 phys_addr
&= PAGE_MASK
;
65 size
= PAGE_ALIGN(last_addr
+ 1) - phys_addr
;
70 area
= get_vm_area(size
, VM_IOREMAP
);
71 pr_debug("Get vm_area returns %p addr %p\n",area
,area
->addr
);
74 area
->phys_addr
= phys_addr
;
76 if (ioremap_page_range((unsigned long)addr
, (unsigned long)addr
+ size
,
81 return (void *) (offset
+ (char *)addr
);
84 void iounmap(void *addr
)
86 struct vm_struct
*area
;
88 vfree((void *) (PAGE_MASK
& (unsigned long) addr
));
89 area
= remove_vm_area((void *) (PAGE_MASK
& (unsigned long) addr
));
91 printk(KERN_ERR
"iounmap: bad address %p\n", addr
);
98 static struct resource shmedia_iomap
= {
99 .name
= "shmedia_iomap",
100 .start
= IOBASE_VADDR
+ PAGE_SIZE
,
101 .end
= IOBASE_END
- 1,
104 static void shmedia_mapioaddr(unsigned long pa
, unsigned long va
);
105 static void shmedia_unmapioaddr(unsigned long vaddr
);
106 static unsigned long shmedia_ioremap(struct resource
*res
, u32 pa
, int sz
);
109 * We have the same problem as the SPARC, so lets have the same comment:
110 * Our mini-allocator...
111 * Boy this is gross! We need it because we must map I/O for
112 * timers and interrupt controller before the kmalloc is available.
119 struct resource xres
; /* Must be first */
120 int xflag
; /* 1 == used */
124 static struct xresource xresv
[XNRES
];
126 static struct xresource
*xres_alloc(void)
128 struct xresource
*xrp
;
132 for (n
= 0; n
< XNRES
; n
++) {
133 if (xrp
->xflag
== 0) {
142 static void xres_free(struct xresource
*xrp
)
147 static struct resource
*shmedia_find_resource(struct resource
*root
,
150 struct resource
*res
;
152 for (res
= root
->child
; res
; res
= res
->sibling
)
153 if (res
->start
<= vaddr
&& res
->end
>= vaddr
)
159 static unsigned long shmedia_alloc_io(unsigned long phys
, unsigned long size
,
162 static int printed_full
= 0;
163 struct xresource
*xres
;
164 struct resource
*res
;
168 if (name
== NULL
) name
= "???";
170 if ((xres
= xres_alloc()) != 0) {
175 printk("%s: done with statics, switching to kmalloc\n",
180 tack
= kmalloc(sizeof (struct resource
) + tlen
+ 1, GFP_KERNEL
);
183 memset(tack
, 0, sizeof(struct resource
));
184 res
= (struct resource
*) tack
;
185 tack
+= sizeof (struct resource
);
188 strncpy(tack
, name
, XNMLN
);
192 return shmedia_ioremap(res
, phys
, size
);
195 static unsigned long shmedia_ioremap(struct resource
*res
, u32 pa
, int sz
)
197 unsigned long offset
= ((unsigned long) pa
) & (~PAGE_MASK
);
198 unsigned long round_sz
= (offset
+ sz
+ PAGE_SIZE
-1) & PAGE_MASK
;
202 if (allocate_resource(&shmedia_iomap
, res
, round_sz
,
203 shmedia_iomap
.start
, shmedia_iomap
.end
,
204 PAGE_SIZE
, NULL
, NULL
) != 0) {
205 panic("alloc_io_res(%s): cannot occupy\n",
206 (res
->name
!= NULL
)? res
->name
: "???");
212 psz
= (res
->end
- res
->start
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
214 /* log at boot time ... */
215 printk("mapioaddr: %6s [%2d page%s] va 0x%08lx pa 0x%08x\n",
216 ((res
->name
!= NULL
) ? res
->name
: "???"),
217 psz
, psz
== 1 ? " " : "s", va
, pa
);
219 for (psz
= res
->end
- res
->start
+ 1; psz
!= 0; psz
-= PAGE_SIZE
) {
220 shmedia_mapioaddr(pa
, va
);
225 res
->start
+= offset
;
226 res
->end
= res
->start
+ sz
- 1; /* not strictly necessary.. */
231 static void shmedia_free_io(struct resource
*res
)
233 unsigned long len
= res
->end
- res
->start
+ 1;
235 BUG_ON((len
& (PAGE_SIZE
- 1)) != 0);
239 shmedia_unmapioaddr(res
->start
+ len
);
242 release_resource(res
);
245 static void *sh64_get_page(void)
247 extern int after_bootmem
;
251 page
= (void *)get_zeroed_page(GFP_ATOMIC
);
253 page
= alloc_bootmem_pages(PAGE_SIZE
);
256 if (!page
|| ((unsigned long)page
& ~PAGE_MASK
))
257 panic("sh64_get_page: Out of memory already?\n");
262 static void shmedia_mapioaddr(unsigned long pa
, unsigned long va
)
268 unsigned long flags
= 1; /* 1 = CB0-1 device */
270 pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa
, va
);
272 pgdp
= pgd_offset_k(va
);
273 if (pgd_none(*pgdp
) || !pgd_present(*pgdp
)) {
274 pmdp
= (pmd_t
*)sh64_get_page();
275 set_pgd(pgdp
, __pgd((unsigned long)pmdp
| _KERNPG_TABLE
));
278 pmdp
= pmd_offset(pgdp
, va
);
279 if (pmd_none(*pmdp
) || !pmd_present(*pmdp
) ) {
280 ptep
= (pte_t
*)sh64_get_page();
281 set_pmd(pmdp
, __pmd((unsigned long)ptep
+ _PAGE_TABLE
));
284 prot
= __pgprot(_PAGE_PRESENT
| _PAGE_READ
| _PAGE_WRITE
|
285 _PAGE_DIRTY
| _PAGE_ACCESSED
| _PAGE_SHARED
| flags
);
287 pte
= pfn_pte(pa
>> PAGE_SHIFT
, prot
);
288 ptep
= pte_offset_kernel(pmdp
, va
);
290 if (!pte_none(*ptep
) &&
291 pte_val(*ptep
) != pte_val(pte
))
296 flush_tlb_kernel_range(va
, PAGE_SIZE
);
299 static void shmedia_unmapioaddr(unsigned long vaddr
)
305 pgdp
= pgd_offset_k(vaddr
);
306 pmdp
= pmd_offset(pgdp
, vaddr
);
308 if (pmd_none(*pmdp
) || pmd_bad(*pmdp
))
311 ptep
= pte_offset_kernel(pmdp
, vaddr
);
313 if (pte_none(*ptep
) || !pte_present(*ptep
))
316 clear_page((void *)ptep
);
317 pte_clear(&init_mm
, vaddr
, ptep
);
320 unsigned long onchip_remap(unsigned long phys
, unsigned long size
, const char *name
)
322 if (size
< PAGE_SIZE
)
325 return shmedia_alloc_io(phys
, size
, name
);
328 void onchip_unmap(unsigned long vaddr
)
330 struct resource
*res
;
333 res
= shmedia_find_resource(&shmedia_iomap
, vaddr
);
335 printk(KERN_ERR
"%s: Failed to free 0x%08lx\n",
336 __FUNCTION__
, vaddr
);
340 psz
= (res
->end
- res
->start
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
342 printk(KERN_DEBUG
"unmapioaddr: %6s [%2d page%s] freed\n",
343 res
->name
, psz
, psz
== 1 ? " " : "s");
345 shmedia_free_io(res
);
347 if ((char *)res
>= (char *)xresv
&&
348 (char *)res
< (char *)&xresv
[XNRES
]) {
349 xres_free((struct xresource
*)res
);
355 #ifdef CONFIG_PROC_FS
357 ioremap_proc_info(char *buf
, char **start
, off_t fpos
, int length
, int *eof
,
360 char *p
= buf
, *e
= buf
+ length
;
364 for (r
= ((struct resource
*)data
)->child
; r
!= NULL
; r
= r
->sibling
) {
365 if (p
+ 32 >= e
) /* Better than nothing */
367 if ((nm
= r
->name
) == 0) nm
= "???";
368 p
+= sprintf(p
, "%08lx-%08lx: %s\n",
369 (unsigned long)r
->start
,
370 (unsigned long)r
->end
, nm
);
375 #endif /* CONFIG_PROC_FS */
377 static int __init
register_proc_onchip(void)
379 #ifdef CONFIG_PROC_FS
380 create_proc_read_entry("io_map",0,0, ioremap_proc_info
, &shmedia_iomap
);
385 __initcall(register_proc_onchip
);