2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * arch/sh64/mm/ioremap.c
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003, 2004 Paul Mundt
11 * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly
12 * derived from arch/i386/mm/ioremap.c .
14 * (C) Copyright 1995 1996 Linus Torvalds
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
22 #include <asm/pgalloc.h>
23 #include <asm/tlbflush.h>
24 #include <linux/ioport.h>
25 #include <linux/bootmem.h>
26 #include <linux/proc_fs.h>
28 static void shmedia_mapioaddr(unsigned long, unsigned long);
29 static unsigned long shmedia_ioremap(struct resource
*, u32
, int);
31 static inline void remap_area_pte(pte_t
* pte
, unsigned long address
, unsigned long size
,
32 unsigned long phys_addr
, unsigned long flags
)
36 pgprot_t pgprot
= __pgprot(_PAGE_PRESENT
| _PAGE_READ
|
37 _PAGE_WRITE
| _PAGE_DIRTY
|
38 _PAGE_ACCESSED
| _PAGE_SHARED
| flags
);
47 pfn
= phys_addr
>> PAGE_SHIFT
;
49 pr_debug(" %s: pte %p address %lx size %lx phys_addr %lx\n",
50 __FUNCTION__
,pte
,address
,size
,phys_addr
);
53 if (!pte_none(*pte
)) {
54 printk("remap_area_pte: page already exists\n");
58 set_pte(pte
, pfn_pte(pfn
, pgprot
));
62 } while (address
&& (address
< end
));
65 static inline int remap_area_pmd(pmd_t
* pmd
, unsigned long address
, unsigned long size
,
66 unsigned long phys_addr
, unsigned long flags
)
70 address
&= ~PGDIR_MASK
;
82 pte_t
* pte
= pte_alloc_kernel(&init_mm
, pmd
, address
);
85 remap_area_pte(pte
, address
, end
- address
, address
+ phys_addr
, flags
);
86 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
88 } while (address
&& (address
< end
));
92 static int remap_area_pages(unsigned long address
, unsigned long phys_addr
,
93 unsigned long size
, unsigned long flags
)
97 unsigned long end
= address
+ size
;
100 dir
= pgd_offset_k(address
);
104 spin_lock(&init_mm
.page_table_lock
);
106 pmd_t
*pmd
= pmd_alloc(&init_mm
, dir
, address
);
110 if (remap_area_pmd(pmd
, address
, end
- address
,
111 phys_addr
+ address
, flags
)) {
115 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
117 } while (address
&& (address
< end
));
118 spin_unlock(&init_mm
.page_table_lock
);
124 * Generic mapping function (not visible outside):
128 * Remap an arbitrary physical address space into the kernel virtual
129 * address space. Needed when the kernel wants to access high addresses
132 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
133 * have to convert them into an offset in a page-aligned mapping, but the
134 * caller shouldn't need to know that small detail.
136 void * __ioremap(unsigned long phys_addr
, unsigned long size
, unsigned long flags
)
139 struct vm_struct
* area
;
140 unsigned long offset
, last_addr
;
142 /* Don't allow wraparound or zero size */
143 last_addr
= phys_addr
+ size
- 1;
144 if (!size
|| last_addr
< phys_addr
)
148 * Mappings have to be page-aligned
150 offset
= phys_addr
& ~PAGE_MASK
;
151 phys_addr
&= PAGE_MASK
;
152 size
= PAGE_ALIGN(last_addr
+ 1) - phys_addr
;
157 area
= get_vm_area(size
, VM_IOREMAP
);
158 pr_debug("Get vm_area returns %p addr %p\n",area
,area
->addr
);
161 area
->phys_addr
= phys_addr
;
163 if (remap_area_pages((unsigned long)addr
, phys_addr
, size
, flags
)) {
167 return (void *) (offset
+ (char *)addr
);
170 void iounmap(void *addr
)
172 struct vm_struct
*area
;
174 vfree((void *) (PAGE_MASK
& (unsigned long) addr
));
175 area
= remove_vm_area((void *) (PAGE_MASK
& (unsigned long) addr
));
177 printk(KERN_ERR
"iounmap: bad address %p\n", addr
);
184 static struct resource shmedia_iomap
= {
185 .name
= "shmedia_iomap",
186 .start
= IOBASE_VADDR
+ PAGE_SIZE
,
187 .end
= IOBASE_END
- 1,
190 static void shmedia_mapioaddr(unsigned long pa
, unsigned long va
);
191 static void shmedia_unmapioaddr(unsigned long vaddr
);
192 static unsigned long shmedia_ioremap(struct resource
*res
, u32 pa
, int sz
);
195 * We have the same problem as the SPARC, so lets have the same comment:
196 * Our mini-allocator...
197 * Boy this is gross! We need it because we must map I/O for
198 * timers and interrupt controller before the kmalloc is available.
205 struct resource xres
; /* Must be first */
206 int xflag
; /* 1 == used */
210 static struct xresource xresv
[XNRES
];
212 static struct xresource
*xres_alloc(void)
214 struct xresource
*xrp
;
218 for (n
= 0; n
< XNRES
; n
++) {
219 if (xrp
->xflag
== 0) {
228 static void xres_free(struct xresource
*xrp
)
233 static struct resource
*shmedia_find_resource(struct resource
*root
,
236 struct resource
*res
;
238 for (res
= root
->child
; res
; res
= res
->sibling
)
239 if (res
->start
<= vaddr
&& res
->end
>= vaddr
)
245 static unsigned long shmedia_alloc_io(unsigned long phys
, unsigned long size
,
248 static int printed_full
= 0;
249 struct xresource
*xres
;
250 struct resource
*res
;
254 if (name
== NULL
) name
= "???";
256 if ((xres
= xres_alloc()) != 0) {
261 printk("%s: done with statics, switching to kmalloc\n",
266 tack
= kmalloc(sizeof (struct resource
) + tlen
+ 1, GFP_KERNEL
);
269 memset(tack
, 0, sizeof(struct resource
));
270 res
= (struct resource
*) tack
;
271 tack
+= sizeof (struct resource
);
274 strncpy(tack
, name
, XNMLN
);
278 return shmedia_ioremap(res
, phys
, size
);
281 static unsigned long shmedia_ioremap(struct resource
*res
, u32 pa
, int sz
)
283 unsigned long offset
= ((unsigned long) pa
) & (~PAGE_MASK
);
284 unsigned long round_sz
= (offset
+ sz
+ PAGE_SIZE
-1) & PAGE_MASK
;
288 if (allocate_resource(&shmedia_iomap
, res
, round_sz
,
289 shmedia_iomap
.start
, shmedia_iomap
.end
,
290 PAGE_SIZE
, NULL
, NULL
) != 0) {
291 panic("alloc_io_res(%s): cannot occupy\n",
292 (res
->name
!= NULL
)? res
->name
: "???");
298 psz
= (res
->end
- res
->start
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
300 /* log at boot time ... */
301 printk("mapioaddr: %6s [%2d page%s] va 0x%08lx pa 0x%08x\n",
302 ((res
->name
!= NULL
) ? res
->name
: "???"),
303 psz
, psz
== 1 ? " " : "s", va
, pa
);
305 for (psz
= res
->end
- res
->start
+ 1; psz
!= 0; psz
-= PAGE_SIZE
) {
306 shmedia_mapioaddr(pa
, va
);
311 res
->start
+= offset
;
312 res
->end
= res
->start
+ sz
- 1; /* not strictly necessary.. */
317 static void shmedia_free_io(struct resource
*res
)
319 unsigned long len
= res
->end
- res
->start
+ 1;
321 BUG_ON((len
& (PAGE_SIZE
- 1)) != 0);
325 shmedia_unmapioaddr(res
->start
+ len
);
328 release_resource(res
);
331 static void *sh64_get_page(void)
333 extern int after_bootmem
;
337 page
= (void *)get_zeroed_page(GFP_ATOMIC
);
339 page
= alloc_bootmem_pages(PAGE_SIZE
);
342 if (!page
|| ((unsigned long)page
& ~PAGE_MASK
))
343 panic("sh64_get_page: Out of memory already?\n");
348 static void shmedia_mapioaddr(unsigned long pa
, unsigned long va
)
354 unsigned long flags
= 1; /* 1 = CB0-1 device */
356 pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa
, va
);
358 pgdp
= pgd_offset_k(va
);
359 if (pgd_none(*pgdp
) || !pgd_present(*pgdp
)) {
360 pmdp
= (pmd_t
*)sh64_get_page();
361 set_pgd(pgdp
, __pgd((unsigned long)pmdp
| _KERNPG_TABLE
));
364 pmdp
= pmd_offset(pgdp
, va
);
365 if (pmd_none(*pmdp
) || !pmd_present(*pmdp
) ) {
366 ptep
= (pte_t
*)sh64_get_page();
367 set_pmd(pmdp
, __pmd((unsigned long)ptep
+ _PAGE_TABLE
));
370 prot
= __pgprot(_PAGE_PRESENT
| _PAGE_READ
| _PAGE_WRITE
|
371 _PAGE_DIRTY
| _PAGE_ACCESSED
| _PAGE_SHARED
| flags
);
373 pte
= pfn_pte(pa
>> PAGE_SHIFT
, prot
);
374 ptep
= pte_offset_kernel(pmdp
, va
);
376 if (!pte_none(*ptep
) &&
377 pte_val(*ptep
) != pte_val(pte
))
382 flush_tlb_kernel_range(va
, PAGE_SIZE
);
385 static void shmedia_unmapioaddr(unsigned long vaddr
)
391 pgdp
= pgd_offset_k(vaddr
);
392 pmdp
= pmd_offset(pgdp
, vaddr
);
394 if (pmd_none(*pmdp
) || pmd_bad(*pmdp
))
397 ptep
= pte_offset_kernel(pmdp
, vaddr
);
399 if (pte_none(*ptep
) || !pte_present(*ptep
))
402 clear_page((void *)ptep
);
403 pte_clear(&init_mm
, vaddr
, ptep
);
406 unsigned long onchip_remap(unsigned long phys
, unsigned long size
, const char *name
)
408 if (size
< PAGE_SIZE
)
411 return shmedia_alloc_io(phys
, size
, name
);
414 void onchip_unmap(unsigned long vaddr
)
416 struct resource
*res
;
419 res
= shmedia_find_resource(&shmedia_iomap
, vaddr
);
421 printk(KERN_ERR
"%s: Failed to free 0x%08lx\n",
422 __FUNCTION__
, vaddr
);
426 psz
= (res
->end
- res
->start
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
428 printk(KERN_DEBUG
"unmapioaddr: %6s [%2d page%s] freed\n",
429 res
->name
, psz
, psz
== 1 ? " " : "s");
431 shmedia_free_io(res
);
433 if ((char *)res
>= (char *)xresv
&&
434 (char *)res
< (char *)&xresv
[XNRES
]) {
435 xres_free((struct xresource
*)res
);
441 #ifdef CONFIG_PROC_FS
443 ioremap_proc_info(char *buf
, char **start
, off_t fpos
, int length
, int *eof
,
446 char *p
= buf
, *e
= buf
+ length
;
450 for (r
= ((struct resource
*)data
)->child
; r
!= NULL
; r
= r
->sibling
) {
451 if (p
+ 32 >= e
) /* Better than nothing */
453 if ((nm
= r
->name
) == 0) nm
= "???";
454 p
+= sprintf(p
, "%08lx-%08lx: %s\n", r
->start
, r
->end
, nm
);
459 #endif /* CONFIG_PROC_FS */
461 static int __init
register_proc_onchip(void)
463 #ifdef CONFIG_PROC_FS
464 create_proc_read_entry("io_map",0,0, ioremap_proc_info
, &shmedia_iomap
);
469 __initcall(register_proc_onchip
);