1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/m68k/mm/kmap.c
5 * Copyright (C) 1997 Roman Hodek
7 * 10/01/99 cleaned up the code and changing to the same interface
8 * used by other architectures /Roman Zippel
11 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
19 #include <asm/setup.h>
22 #include <asm/tlbflush.h>
27 * For 040/060 we can use the virtual memory area like other architectures,
28 * but for 020/030 we want to use early termination page descriptors and we
29 * can't mix this with normal page descriptors, so we have to copy that code
30 * (mm/vmalloc.c) and return appropriately aligned addresses.
33 #ifdef CPU_M68040_OR_M68060_ONLY
35 #define IO_SIZE PAGE_SIZE
37 static inline struct vm_struct
*get_io_area(unsigned long size
)
39 return get_vm_area(size
, VM_IOREMAP
);
43 static inline void free_io_area(void *addr
)
45 vfree((void *)(PAGE_MASK
& (unsigned long)addr
));
50 #define IO_SIZE PMD_SIZE
52 static struct vm_struct
*iolist
;
55 * __free_io_area unmaps nearly everything, so be careful
56 * Currently it doesn't free pointer/page tables anymore but this
57 * wasn't used anyway and might be added later.
59 static void __free_io_area(void *addr
, unsigned long size
)
61 unsigned long virtaddr
= (unsigned long)addr
;
68 while ((long)size
> 0) {
69 pgd_dir
= pgd_offset_k(virtaddr
);
70 p4d_dir
= p4d_offset(pgd_dir
, virtaddr
);
71 pud_dir
= pud_offset(p4d_dir
, virtaddr
);
72 if (pud_bad(*pud_dir
)) {
73 printk("iounmap: bad pud(%08lx)\n", pud_val(*pud_dir
));
77 pmd_dir
= pmd_offset(pud_dir
, virtaddr
);
79 #if CONFIG_PGTABLE_LEVELS == 3
80 if (CPU_IS_020_OR_030
) {
81 int pmd_type
= pmd_val(*pmd_dir
) & _DESCTYPE_MASK
;
83 if (pmd_type
== _PAGE_PRESENT
) {
88 } else if (pmd_type
== 0)
93 if (pmd_bad(*pmd_dir
)) {
94 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir
));
98 pte_dir
= pte_offset_kernel(pmd_dir
, virtaddr
);
100 pte_val(*pte_dir
) = 0;
101 virtaddr
+= PAGE_SIZE
;
108 static struct vm_struct
*get_io_area(unsigned long size
)
111 struct vm_struct
**p
, *tmp
, *area
;
113 area
= kmalloc(sizeof(*area
), GFP_KERNEL
);
117 for (p
= &iolist
; (tmp
= *p
) ; p
= &tmp
->next
) {
118 if (size
+ addr
< (unsigned long)tmp
->addr
)
120 if (addr
> KMAP_END
-size
) {
124 addr
= tmp
->size
+ (unsigned long)tmp
->addr
;
126 area
->addr
= (void *)addr
;
127 area
->size
= size
+ IO_SIZE
;
133 static inline void free_io_area(void *addr
)
135 struct vm_struct
**p
, *tmp
;
139 addr
= (void *)((unsigned long)addr
& -IO_SIZE
);
140 for (p
= &iolist
; (tmp
= *p
) ; p
= &tmp
->next
) {
141 if (tmp
->addr
== addr
) {
143 /* remove gap added in get_io_area() */
144 __free_io_area(tmp
->addr
, tmp
->size
- IO_SIZE
);
154 * Map some physical address range into the kernel address space.
156 /* Rewritten by Andreas Schwab to remove all races. */
158 void __iomem
*__ioremap(unsigned long physaddr
, unsigned long size
, int cacheflag
)
160 struct vm_struct
*area
;
161 unsigned long virtaddr
, retaddr
;
170 * Don't allow mappings that wrap..
172 if (!size
|| physaddr
> (unsigned long)(-size
))
177 if ((physaddr
>= 0x40000000) && (physaddr
+ size
< 0x60000000)
178 && (cacheflag
== IOMAP_NOCACHE_SER
))
179 return (void __iomem
*)physaddr
;
184 if (physaddr
>= 0xff000000 && cacheflag
== IOMAP_NOCACHE_SER
)
185 return (void __iomem
*)physaddr
;
188 #ifdef CONFIG_COLDFIRE
189 if (__cf_internalio(physaddr
))
190 return (void __iomem
*) physaddr
;
194 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr
, size
, cacheflag
);
197 * Mappings have to be aligned
199 offset
= physaddr
& (IO_SIZE
- 1);
200 physaddr
&= -IO_SIZE
;
201 size
= (size
+ offset
+ IO_SIZE
- 1) & -IO_SIZE
;
206 area
= get_io_area(size
);
210 virtaddr
= (unsigned long)area
->addr
;
211 retaddr
= virtaddr
+ offset
;
213 printk("0x%lx,0x%lx,0x%lx", physaddr
, virtaddr
, retaddr
);
217 * add cache and table flags to physical address
219 if (CPU_IS_040_OR_060
) {
220 physaddr
|= (_PAGE_PRESENT
| _PAGE_GLOBAL040
|
221 _PAGE_ACCESSED
| _PAGE_DIRTY
);
223 case IOMAP_FULL_CACHING
:
224 physaddr
|= _PAGE_CACHE040
;
226 case IOMAP_NOCACHE_SER
:
228 physaddr
|= _PAGE_NOCACHE_S
;
230 case IOMAP_NOCACHE_NONSER
:
231 physaddr
|= _PAGE_NOCACHE
;
233 case IOMAP_WRITETHROUGH
:
234 physaddr
|= _PAGE_CACHE040W
;
238 physaddr
|= (_PAGE_PRESENT
| _PAGE_ACCESSED
|
239 _PAGE_DIRTY
| _PAGE_READWRITE
);
241 case IOMAP_NOCACHE_SER
:
242 case IOMAP_NOCACHE_NONSER
:
244 physaddr
|= _PAGE_NOCACHE030
;
246 case IOMAP_FULL_CACHING
:
247 case IOMAP_WRITETHROUGH
:
252 while ((long)size
> 0) {
254 if (!(virtaddr
& (PMD_SIZE
-1)))
255 printk ("\npa=%#lx va=%#lx ", physaddr
, virtaddr
);
257 pgd_dir
= pgd_offset_k(virtaddr
);
258 p4d_dir
= p4d_offset(pgd_dir
, virtaddr
);
259 pud_dir
= pud_offset(p4d_dir
, virtaddr
);
260 pmd_dir
= pmd_alloc(&init_mm
, pud_dir
, virtaddr
);
262 printk("ioremap: no mem for pmd_dir\n");
266 #if CONFIG_PGTABLE_LEVELS == 3
267 if (CPU_IS_020_OR_030
) {
268 pmd_val(*pmd_dir
) = physaddr
;
269 physaddr
+= PMD_SIZE
;
270 virtaddr
+= PMD_SIZE
;
275 pte_dir
= pte_alloc_kernel(pmd_dir
, virtaddr
);
277 printk("ioremap: no mem for pte_dir\n");
281 pte_val(*pte_dir
) = physaddr
;
282 virtaddr
+= PAGE_SIZE
;
283 physaddr
+= PAGE_SIZE
;
292 return (void __iomem
*)retaddr
;
294 EXPORT_SYMBOL(__ioremap
);
297 * Unmap an ioremap()ed region again
299 void iounmap(void __iomem
*addr
)
303 ((unsigned long)addr
>= 0x40000000) &&
304 ((unsigned long)addr
< 0x60000000))
308 if (MACH_IS_VIRT
&& (unsigned long)addr
>= 0xff000000)
311 #ifdef CONFIG_COLDFIRE
312 if (cf_internalio(addr
))
315 free_io_area((__force
void *)addr
);
317 EXPORT_SYMBOL(iounmap
);
320 * Set new cache mode for some kernel address space.
321 * The caller must push data for that range itself, if such data may already
324 void kernel_set_cachemode(void *addr
, unsigned long size
, int cmode
)
326 unsigned long virtaddr
= (unsigned long)addr
;
333 if (CPU_IS_040_OR_060
) {
335 case IOMAP_FULL_CACHING
:
336 cmode
= _PAGE_CACHE040
;
338 case IOMAP_NOCACHE_SER
:
340 cmode
= _PAGE_NOCACHE_S
;
342 case IOMAP_NOCACHE_NONSER
:
343 cmode
= _PAGE_NOCACHE
;
345 case IOMAP_WRITETHROUGH
:
346 cmode
= _PAGE_CACHE040W
;
351 case IOMAP_NOCACHE_SER
:
352 case IOMAP_NOCACHE_NONSER
:
354 cmode
= _PAGE_NOCACHE030
;
356 case IOMAP_FULL_CACHING
:
357 case IOMAP_WRITETHROUGH
:
362 while ((long)size
> 0) {
363 pgd_dir
= pgd_offset_k(virtaddr
);
364 p4d_dir
= p4d_offset(pgd_dir
, virtaddr
);
365 pud_dir
= pud_offset(p4d_dir
, virtaddr
);
366 if (pud_bad(*pud_dir
)) {
367 printk("iocachemode: bad pud(%08lx)\n", pud_val(*pud_dir
));
371 pmd_dir
= pmd_offset(pud_dir
, virtaddr
);
373 #if CONFIG_PGTABLE_LEVELS == 3
374 if (CPU_IS_020_OR_030
) {
375 unsigned long pmd
= pmd_val(*pmd_dir
);
377 if ((pmd
& _DESCTYPE_MASK
) == _PAGE_PRESENT
) {
378 *pmd_dir
= __pmd((pmd
& _CACHEMASK040
) | cmode
);
379 virtaddr
+= PMD_SIZE
;
386 if (pmd_bad(*pmd_dir
)) {
387 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir
));
391 pte_dir
= pte_offset_kernel(pmd_dir
, virtaddr
);
393 pte_val(*pte_dir
) = (pte_val(*pte_dir
) & _CACHEMASK040
) | cmode
;
394 virtaddr
+= PAGE_SIZE
;
400 EXPORT_SYMBOL(kernel_set_cachemode
);