2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * arch/sh64/mm/ioremap.c
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003, 2004 Paul Mundt
11 * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly
12 * derived from arch/i386/mm/ioremap.c .
14 * (C) Copyright 1995 1996 Linus Torvalds
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
22 #include <linux/ioport.h>
23 #include <linux/bootmem.h>
24 #include <linux/proc_fs.h>
25 #include <linux/module.h>
26 #include <asm/pgalloc.h>
27 #include <asm/tlbflush.h>
29 static void shmedia_mapioaddr(unsigned long, unsigned long);
30 static unsigned long shmedia_ioremap(struct resource
*, u32
, int);
33 * Generic mapping function (not visible outside):
37 * Remap an arbitrary physical address space into the kernel virtual
38 * address space. Needed when the kernel wants to access high addresses
41 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
42 * have to convert them into an offset in a page-aligned mapping, but the
43 * caller shouldn't need to know that small detail.
45 void * __ioremap(unsigned long phys_addr
, unsigned long size
, unsigned long flags
)
48 struct vm_struct
* area
;
49 unsigned long offset
, last_addr
;
52 /* Don't allow wraparound or zero size */
53 last_addr
= phys_addr
+ size
- 1;
54 if (!size
|| last_addr
< phys_addr
)
57 pgprot
= __pgprot(_PAGE_PRESENT
| _PAGE_READ
|
58 _PAGE_WRITE
| _PAGE_DIRTY
|
59 _PAGE_ACCESSED
| _PAGE_SHARED
| flags
);
62 * Mappings have to be page-aligned
64 offset
= phys_addr
& ~PAGE_MASK
;
65 phys_addr
&= PAGE_MASK
;
66 size
= PAGE_ALIGN(last_addr
+ 1) - phys_addr
;
71 area
= get_vm_area(size
, VM_IOREMAP
);
72 pr_debug("Get vm_area returns %p addr %p\n",area
,area
->addr
);
75 area
->phys_addr
= phys_addr
;
77 if (ioremap_page_range((unsigned long)addr
, (unsigned long)addr
+ size
,
82 return (void *) (offset
+ (char *)addr
);
84 EXPORT_SYMBOL(__ioremap
);
86 void iounmap(void *addr
)
88 struct vm_struct
*area
;
90 vfree((void *) (PAGE_MASK
& (unsigned long) addr
));
91 area
= remove_vm_area((void *) (PAGE_MASK
& (unsigned long) addr
));
93 printk(KERN_ERR
"iounmap: bad address %p\n", addr
);
99 EXPORT_SYMBOL(iounmap
);
101 static struct resource shmedia_iomap
= {
102 .name
= "shmedia_iomap",
103 .start
= IOBASE_VADDR
+ PAGE_SIZE
,
104 .end
= IOBASE_END
- 1,
107 static void shmedia_mapioaddr(unsigned long pa
, unsigned long va
);
108 static void shmedia_unmapioaddr(unsigned long vaddr
);
109 static unsigned long shmedia_ioremap(struct resource
*res
, u32 pa
, int sz
);
112 * We have the same problem as the SPARC, so lets have the same comment:
113 * Our mini-allocator...
114 * Boy this is gross! We need it because we must map I/O for
115 * timers and interrupt controller before the kmalloc is available.
122 struct resource xres
; /* Must be first */
123 int xflag
; /* 1 == used */
127 static struct xresource xresv
[XNRES
];
129 static struct xresource
*xres_alloc(void)
131 struct xresource
*xrp
;
135 for (n
= 0; n
< XNRES
; n
++) {
136 if (xrp
->xflag
== 0) {
145 static void xres_free(struct xresource
*xrp
)
150 static struct resource
*shmedia_find_resource(struct resource
*root
,
153 struct resource
*res
;
155 for (res
= root
->child
; res
; res
= res
->sibling
)
156 if (res
->start
<= vaddr
&& res
->end
>= vaddr
)
162 static unsigned long shmedia_alloc_io(unsigned long phys
, unsigned long size
,
165 static int printed_full
= 0;
166 struct xresource
*xres
;
167 struct resource
*res
;
171 if (name
== NULL
) name
= "???";
173 if ((xres
= xres_alloc()) != 0) {
178 printk("%s: done with statics, switching to kmalloc\n",
183 tack
= kmalloc(sizeof (struct resource
) + tlen
+ 1, GFP_KERNEL
);
186 memset(tack
, 0, sizeof(struct resource
));
187 res
= (struct resource
*) tack
;
188 tack
+= sizeof (struct resource
);
191 strncpy(tack
, name
, XNMLN
);
195 return shmedia_ioremap(res
, phys
, size
);
198 static unsigned long shmedia_ioremap(struct resource
*res
, u32 pa
, int sz
)
200 unsigned long offset
= ((unsigned long) pa
) & (~PAGE_MASK
);
201 unsigned long round_sz
= (offset
+ sz
+ PAGE_SIZE
-1) & PAGE_MASK
;
205 if (allocate_resource(&shmedia_iomap
, res
, round_sz
,
206 shmedia_iomap
.start
, shmedia_iomap
.end
,
207 PAGE_SIZE
, NULL
, NULL
) != 0) {
208 panic("alloc_io_res(%s): cannot occupy\n",
209 (res
->name
!= NULL
)? res
->name
: "???");
215 psz
= (res
->end
- res
->start
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
217 /* log at boot time ... */
218 printk("mapioaddr: %6s [%2d page%s] va 0x%08lx pa 0x%08x\n",
219 ((res
->name
!= NULL
) ? res
->name
: "???"),
220 psz
, psz
== 1 ? " " : "s", va
, pa
);
222 for (psz
= res
->end
- res
->start
+ 1; psz
!= 0; psz
-= PAGE_SIZE
) {
223 shmedia_mapioaddr(pa
, va
);
228 res
->start
+= offset
;
229 res
->end
= res
->start
+ sz
- 1; /* not strictly necessary.. */
234 static void shmedia_free_io(struct resource
*res
)
236 unsigned long len
= res
->end
- res
->start
+ 1;
238 BUG_ON((len
& (PAGE_SIZE
- 1)) != 0);
242 shmedia_unmapioaddr(res
->start
+ len
);
245 release_resource(res
);
248 static __init_refok
void *sh64_get_page(void)
250 extern int after_bootmem
;
254 page
= (void *)get_zeroed_page(GFP_ATOMIC
);
256 page
= alloc_bootmem_pages(PAGE_SIZE
);
259 if (!page
|| ((unsigned long)page
& ~PAGE_MASK
))
260 panic("sh64_get_page: Out of memory already?\n");
265 static void shmedia_mapioaddr(unsigned long pa
, unsigned long va
)
271 unsigned long flags
= 1; /* 1 = CB0-1 device */
273 pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa
, va
);
275 pgdp
= pgd_offset_k(va
);
276 if (pgd_none(*pgdp
) || !pgd_present(*pgdp
)) {
277 pmdp
= (pmd_t
*)sh64_get_page();
278 set_pgd(pgdp
, __pgd((unsigned long)pmdp
| _KERNPG_TABLE
));
281 pmdp
= pmd_offset(pgdp
, va
);
282 if (pmd_none(*pmdp
) || !pmd_present(*pmdp
) ) {
283 ptep
= (pte_t
*)sh64_get_page();
284 set_pmd(pmdp
, __pmd((unsigned long)ptep
+ _PAGE_TABLE
));
287 prot
= __pgprot(_PAGE_PRESENT
| _PAGE_READ
| _PAGE_WRITE
|
288 _PAGE_DIRTY
| _PAGE_ACCESSED
| _PAGE_SHARED
| flags
);
290 pte
= pfn_pte(pa
>> PAGE_SHIFT
, prot
);
291 ptep
= pte_offset_kernel(pmdp
, va
);
293 if (!pte_none(*ptep
) &&
294 pte_val(*ptep
) != pte_val(pte
))
299 flush_tlb_kernel_range(va
, PAGE_SIZE
);
302 static void shmedia_unmapioaddr(unsigned long vaddr
)
308 pgdp
= pgd_offset_k(vaddr
);
309 pmdp
= pmd_offset(pgdp
, vaddr
);
311 if (pmd_none(*pmdp
) || pmd_bad(*pmdp
))
314 ptep
= pte_offset_kernel(pmdp
, vaddr
);
316 if (pte_none(*ptep
) || !pte_present(*ptep
))
319 clear_page((void *)ptep
);
320 pte_clear(&init_mm
, vaddr
, ptep
);
323 unsigned long onchip_remap(unsigned long phys
, unsigned long size
, const char *name
)
325 if (size
< PAGE_SIZE
)
328 return shmedia_alloc_io(phys
, size
, name
);
331 void onchip_unmap(unsigned long vaddr
)
333 struct resource
*res
;
336 res
= shmedia_find_resource(&shmedia_iomap
, vaddr
);
338 printk(KERN_ERR
"%s: Failed to free 0x%08lx\n",
339 __FUNCTION__
, vaddr
);
343 psz
= (res
->end
- res
->start
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
345 printk(KERN_DEBUG
"unmapioaddr: %6s [%2d page%s] freed\n",
346 res
->name
, psz
, psz
== 1 ? " " : "s");
348 shmedia_free_io(res
);
350 if ((char *)res
>= (char *)xresv
&&
351 (char *)res
< (char *)&xresv
[XNRES
]) {
352 xres_free((struct xresource
*)res
);
358 #ifdef CONFIG_PROC_FS
360 ioremap_proc_info(char *buf
, char **start
, off_t fpos
, int length
, int *eof
,
363 char *p
= buf
, *e
= buf
+ length
;
367 for (r
= ((struct resource
*)data
)->child
; r
!= NULL
; r
= r
->sibling
) {
368 if (p
+ 32 >= e
) /* Better than nothing */
370 if ((nm
= r
->name
) == 0) nm
= "???";
371 p
+= sprintf(p
, "%08lx-%08lx: %s\n",
372 (unsigned long)r
->start
,
373 (unsigned long)r
->end
, nm
);
378 #endif /* CONFIG_PROC_FS */
380 static int __init
register_proc_onchip(void)
382 #ifdef CONFIG_PROC_FS
383 create_proc_read_entry("io_map",0,0, ioremap_proc_info
, &shmedia_iomap
);
388 __initcall(register_proc_onchip
);