2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * arch/sh64/mm/ioremap.c
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003, 2004 Paul Mundt
11 * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly
12 * derived from arch/i386/mm/ioremap.c .
14 * (C) Copyright 1995 1996 Linus Torvalds
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
22 #include <asm/pgalloc.h>
23 #include <asm/tlbflush.h>
24 #include <linux/ioport.h>
25 #include <linux/bootmem.h>
26 #include <linux/proc_fs.h>
28 static void shmedia_mapioaddr(unsigned long, unsigned long);
29 static unsigned long shmedia_ioremap(struct resource
*, u32
, int);
31 static inline void remap_area_pte(pte_t
* pte
, unsigned long address
, unsigned long size
,
32 unsigned long phys_addr
, unsigned long flags
)
36 pgprot_t pgprot
= __pgprot(_PAGE_PRESENT
| _PAGE_READ
|
37 _PAGE_WRITE
| _PAGE_DIRTY
|
38 _PAGE_ACCESSED
| _PAGE_SHARED
| flags
);
47 pfn
= phys_addr
>> PAGE_SHIFT
;
49 pr_debug(" %s: pte %p address %lx size %lx phys_addr %lx\n",
50 __FUNCTION__
,pte
,address
,size
,phys_addr
);
53 if (!pte_none(*pte
)) {
54 printk("remap_area_pte: page already exists\n");
58 set_pte(pte
, pfn_pte(pfn
, pgprot
));
62 } while (address
&& (address
< end
));
65 static inline int remap_area_pmd(pmd_t
* pmd
, unsigned long address
, unsigned long size
,
66 unsigned long phys_addr
, unsigned long flags
)
70 address
&= ~PGDIR_MASK
;
82 pte_t
* pte
= pte_alloc_kernel(pmd
, address
);
85 remap_area_pte(pte
, address
, end
- address
, address
+ phys_addr
, flags
);
86 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
88 } while (address
&& (address
< end
));
92 static int remap_area_pages(unsigned long address
, unsigned long phys_addr
,
93 unsigned long size
, unsigned long flags
)
97 unsigned long end
= address
+ size
;
100 dir
= pgd_offset_k(address
);
105 pmd_t
*pmd
= pmd_alloc(&init_mm
, dir
, address
);
109 if (remap_area_pmd(pmd
, address
, end
- address
,
110 phys_addr
+ address
, flags
)) {
114 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
116 } while (address
&& (address
< end
));
122 * Generic mapping function (not visible outside):
126 * Remap an arbitrary physical address space into the kernel virtual
127 * address space. Needed when the kernel wants to access high addresses
130 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
131 * have to convert them into an offset in a page-aligned mapping, but the
132 * caller shouldn't need to know that small detail.
134 void * __ioremap(unsigned long phys_addr
, unsigned long size
, unsigned long flags
)
137 struct vm_struct
* area
;
138 unsigned long offset
, last_addr
;
140 /* Don't allow wraparound or zero size */
141 last_addr
= phys_addr
+ size
- 1;
142 if (!size
|| last_addr
< phys_addr
)
146 * Mappings have to be page-aligned
148 offset
= phys_addr
& ~PAGE_MASK
;
149 phys_addr
&= PAGE_MASK
;
150 size
= PAGE_ALIGN(last_addr
+ 1) - phys_addr
;
155 area
= get_vm_area(size
, VM_IOREMAP
);
156 pr_debug("Get vm_area returns %p addr %p\n",area
,area
->addr
);
159 area
->phys_addr
= phys_addr
;
161 if (remap_area_pages((unsigned long)addr
, phys_addr
, size
, flags
)) {
165 return (void *) (offset
+ (char *)addr
);
168 void iounmap(void *addr
)
170 struct vm_struct
*area
;
172 vfree((void *) (PAGE_MASK
& (unsigned long) addr
));
173 area
= remove_vm_area((void *) (PAGE_MASK
& (unsigned long) addr
));
175 printk(KERN_ERR
"iounmap: bad address %p\n", addr
);
182 static struct resource shmedia_iomap
= {
183 .name
= "shmedia_iomap",
184 .start
= IOBASE_VADDR
+ PAGE_SIZE
,
185 .end
= IOBASE_END
- 1,
188 static void shmedia_mapioaddr(unsigned long pa
, unsigned long va
);
189 static void shmedia_unmapioaddr(unsigned long vaddr
);
190 static unsigned long shmedia_ioremap(struct resource
*res
, u32 pa
, int sz
);
193 * We have the same problem as the SPARC, so lets have the same comment:
194 * Our mini-allocator...
195 * Boy this is gross! We need it because we must map I/O for
196 * timers and interrupt controller before the kmalloc is available.
203 struct resource xres
; /* Must be first */
204 int xflag
; /* 1 == used */
208 static struct xresource xresv
[XNRES
];
210 static struct xresource
*xres_alloc(void)
212 struct xresource
*xrp
;
216 for (n
= 0; n
< XNRES
; n
++) {
217 if (xrp
->xflag
== 0) {
226 static void xres_free(struct xresource
*xrp
)
231 static struct resource
*shmedia_find_resource(struct resource
*root
,
234 struct resource
*res
;
236 for (res
= root
->child
; res
; res
= res
->sibling
)
237 if (res
->start
<= vaddr
&& res
->end
>= vaddr
)
243 static unsigned long shmedia_alloc_io(unsigned long phys
, unsigned long size
,
246 static int printed_full
= 0;
247 struct xresource
*xres
;
248 struct resource
*res
;
252 if (name
== NULL
) name
= "???";
254 if ((xres
= xres_alloc()) != 0) {
259 printk("%s: done with statics, switching to kmalloc\n",
264 tack
= kmalloc(sizeof (struct resource
) + tlen
+ 1, GFP_KERNEL
);
267 memset(tack
, 0, sizeof(struct resource
));
268 res
= (struct resource
*) tack
;
269 tack
+= sizeof (struct resource
);
272 strncpy(tack
, name
, XNMLN
);
276 return shmedia_ioremap(res
, phys
, size
);
279 static unsigned long shmedia_ioremap(struct resource
*res
, u32 pa
, int sz
)
281 unsigned long offset
= ((unsigned long) pa
) & (~PAGE_MASK
);
282 unsigned long round_sz
= (offset
+ sz
+ PAGE_SIZE
-1) & PAGE_MASK
;
286 if (allocate_resource(&shmedia_iomap
, res
, round_sz
,
287 shmedia_iomap
.start
, shmedia_iomap
.end
,
288 PAGE_SIZE
, NULL
, NULL
) != 0) {
289 panic("alloc_io_res(%s): cannot occupy\n",
290 (res
->name
!= NULL
)? res
->name
: "???");
296 psz
= (res
->end
- res
->start
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
298 /* log at boot time ... */
299 printk("mapioaddr: %6s [%2d page%s] va 0x%08lx pa 0x%08x\n",
300 ((res
->name
!= NULL
) ? res
->name
: "???"),
301 psz
, psz
== 1 ? " " : "s", va
, pa
);
303 for (psz
= res
->end
- res
->start
+ 1; psz
!= 0; psz
-= PAGE_SIZE
) {
304 shmedia_mapioaddr(pa
, va
);
309 res
->start
+= offset
;
310 res
->end
= res
->start
+ sz
- 1; /* not strictly necessary.. */
315 static void shmedia_free_io(struct resource
*res
)
317 unsigned long len
= res
->end
- res
->start
+ 1;
319 BUG_ON((len
& (PAGE_SIZE
- 1)) != 0);
323 shmedia_unmapioaddr(res
->start
+ len
);
326 release_resource(res
);
329 static void *sh64_get_page(void)
331 extern int after_bootmem
;
335 page
= (void *)get_zeroed_page(GFP_ATOMIC
);
337 page
= alloc_bootmem_pages(PAGE_SIZE
);
340 if (!page
|| ((unsigned long)page
& ~PAGE_MASK
))
341 panic("sh64_get_page: Out of memory already?\n");
346 static void shmedia_mapioaddr(unsigned long pa
, unsigned long va
)
352 unsigned long flags
= 1; /* 1 = CB0-1 device */
354 pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa
, va
);
356 pgdp
= pgd_offset_k(va
);
357 if (pgd_none(*pgdp
) || !pgd_present(*pgdp
)) {
358 pmdp
= (pmd_t
*)sh64_get_page();
359 set_pgd(pgdp
, __pgd((unsigned long)pmdp
| _KERNPG_TABLE
));
362 pmdp
= pmd_offset(pgdp
, va
);
363 if (pmd_none(*pmdp
) || !pmd_present(*pmdp
) ) {
364 ptep
= (pte_t
*)sh64_get_page();
365 set_pmd(pmdp
, __pmd((unsigned long)ptep
+ _PAGE_TABLE
));
368 prot
= __pgprot(_PAGE_PRESENT
| _PAGE_READ
| _PAGE_WRITE
|
369 _PAGE_DIRTY
| _PAGE_ACCESSED
| _PAGE_SHARED
| flags
);
371 pte
= pfn_pte(pa
>> PAGE_SHIFT
, prot
);
372 ptep
= pte_offset_kernel(pmdp
, va
);
374 if (!pte_none(*ptep
) &&
375 pte_val(*ptep
) != pte_val(pte
))
380 flush_tlb_kernel_range(va
, PAGE_SIZE
);
383 static void shmedia_unmapioaddr(unsigned long vaddr
)
389 pgdp
= pgd_offset_k(vaddr
);
390 pmdp
= pmd_offset(pgdp
, vaddr
);
392 if (pmd_none(*pmdp
) || pmd_bad(*pmdp
))
395 ptep
= pte_offset_kernel(pmdp
, vaddr
);
397 if (pte_none(*ptep
) || !pte_present(*ptep
))
400 clear_page((void *)ptep
);
401 pte_clear(&init_mm
, vaddr
, ptep
);
404 unsigned long onchip_remap(unsigned long phys
, unsigned long size
, const char *name
)
406 if (size
< PAGE_SIZE
)
409 return shmedia_alloc_io(phys
, size
, name
);
412 void onchip_unmap(unsigned long vaddr
)
414 struct resource
*res
;
417 res
= shmedia_find_resource(&shmedia_iomap
, vaddr
);
419 printk(KERN_ERR
"%s: Failed to free 0x%08lx\n",
420 __FUNCTION__
, vaddr
);
424 psz
= (res
->end
- res
->start
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
426 printk(KERN_DEBUG
"unmapioaddr: %6s [%2d page%s] freed\n",
427 res
->name
, psz
, psz
== 1 ? " " : "s");
429 shmedia_free_io(res
);
431 if ((char *)res
>= (char *)xresv
&&
432 (char *)res
< (char *)&xresv
[XNRES
]) {
433 xres_free((struct xresource
*)res
);
439 #ifdef CONFIG_PROC_FS
441 ioremap_proc_info(char *buf
, char **start
, off_t fpos
, int length
, int *eof
,
444 char *p
= buf
, *e
= buf
+ length
;
448 for (r
= ((struct resource
*)data
)->child
; r
!= NULL
; r
= r
->sibling
) {
449 if (p
+ 32 >= e
) /* Better than nothing */
451 if ((nm
= r
->name
) == 0) nm
= "???";
452 p
+= sprintf(p
, "%08lx-%08lx: %s\n", r
->start
, r
->end
, nm
);
457 #endif /* CONFIG_PROC_FS */
459 static int __init
register_proc_onchip(void)
461 #ifdef CONFIG_PROC_FS
462 create_proc_read_entry("io_map",0,0, ioremap_proc_info
, &shmedia_iomap
);
467 __initcall(register_proc_onchip
);