1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/m68k/mm/kmap.c
5 * Copyright (C) 1997 Roman Hodek
7 * 10/01/99 cleaned up the code and changing to the same interface
8 * used by other architectures /Roman Zippel
11 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
19 #include <asm/setup.h>
20 #include <asm/segment.h>
22 #include <asm/pgalloc.h>
27 #define PTRTREESIZE (256*1024)
30 * For 040/060 we can use the virtual memory area like other architectures,
31 * but for 020/030 we want to use early termination page descriptors and we
32 * can't mix this with normal page descriptors, so we have to copy that code
33 * (mm/vmalloc.c) and return appropriately aligned addresses.
36 #ifdef CPU_M68040_OR_M68060_ONLY
38 #define IO_SIZE PAGE_SIZE
40 static inline struct vm_struct
*get_io_area(unsigned long size
)
42 return get_vm_area(size
, VM_IOREMAP
);
46 static inline void free_io_area(void *addr
)
48 vfree((void *)(PAGE_MASK
& (unsigned long)addr
));
53 #define IO_SIZE (256*1024)
55 static struct vm_struct
*iolist
;
58 * __free_io_area unmaps nearly everything, so be careful
59 * Currently it doesn't free pointer/page tables anymore but this
60 * wasn't used anyway and might be added later.
62 static void __free_io_area(void *addr
, unsigned long size
)
64 unsigned long virtaddr
= (unsigned long)addr
;
71 while ((long)size
> 0) {
72 pgd_dir
= pgd_offset_k(virtaddr
);
73 p4d_dir
= p4d_offset(pgd_dir
, virtaddr
);
74 pud_dir
= pud_offset(p4d_dir
, virtaddr
);
75 if (pud_bad(*pud_dir
)) {
76 printk("iounmap: bad pud(%08lx)\n", pud_val(*pud_dir
));
80 pmd_dir
= pmd_offset(pud_dir
, virtaddr
);
82 #if CONFIG_PGTABLE_LEVELS == 3
83 if (CPU_IS_020_OR_030
) {
84 int pmd_off
= (virtaddr
/PTRTREESIZE
) & 15;
85 int pmd_type
= pmd_dir
->pmd
[pmd_off
] & _DESCTYPE_MASK
;
87 if (pmd_type
== _PAGE_PRESENT
) {
88 pmd_dir
->pmd
[pmd_off
] = 0;
89 virtaddr
+= PTRTREESIZE
;
92 } else if (pmd_type
== 0)
97 if (pmd_bad(*pmd_dir
)) {
98 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir
));
102 pte_dir
= pte_offset_kernel(pmd_dir
, virtaddr
);
104 pte_val(*pte_dir
) = 0;
105 virtaddr
+= PAGE_SIZE
;
112 static struct vm_struct
*get_io_area(unsigned long size
)
115 struct vm_struct
**p
, *tmp
, *area
;
117 area
= kmalloc(sizeof(*area
), GFP_KERNEL
);
121 for (p
= &iolist
; (tmp
= *p
) ; p
= &tmp
->next
) {
122 if (size
+ addr
< (unsigned long)tmp
->addr
)
124 if (addr
> KMAP_END
-size
) {
128 addr
= tmp
->size
+ (unsigned long)tmp
->addr
;
130 area
->addr
= (void *)addr
;
131 area
->size
= size
+ IO_SIZE
;
137 static inline void free_io_area(void *addr
)
139 struct vm_struct
**p
, *tmp
;
143 addr
= (void *)((unsigned long)addr
& -IO_SIZE
);
144 for (p
= &iolist
; (tmp
= *p
) ; p
= &tmp
->next
) {
145 if (tmp
->addr
== addr
) {
147 /* remove gap added in get_io_area() */
148 __free_io_area(tmp
->addr
, tmp
->size
- IO_SIZE
);
158 * Map some physical address range into the kernel address space.
160 /* Rewritten by Andreas Schwab to remove all races. */
162 void __iomem
*__ioremap(unsigned long physaddr
, unsigned long size
, int cacheflag
)
164 struct vm_struct
*area
;
165 unsigned long virtaddr
, retaddr
;
174 * Don't allow mappings that wrap..
176 if (!size
|| physaddr
> (unsigned long)(-size
))
181 if ((physaddr
>= 0x40000000) && (physaddr
+ size
< 0x60000000)
182 && (cacheflag
== IOMAP_NOCACHE_SER
))
183 return (void __iomem
*)physaddr
;
186 #ifdef CONFIG_COLDFIRE
187 if (__cf_internalio(physaddr
))
188 return (void __iomem
*) physaddr
;
192 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr
, size
, cacheflag
);
195 * Mappings have to be aligned
197 offset
= physaddr
& (IO_SIZE
- 1);
198 physaddr
&= -IO_SIZE
;
199 size
= (size
+ offset
+ IO_SIZE
- 1) & -IO_SIZE
;
204 area
= get_io_area(size
);
208 virtaddr
= (unsigned long)area
->addr
;
209 retaddr
= virtaddr
+ offset
;
211 printk("0x%lx,0x%lx,0x%lx", physaddr
, virtaddr
, retaddr
);
215 * add cache and table flags to physical address
217 if (CPU_IS_040_OR_060
) {
218 physaddr
|= (_PAGE_PRESENT
| _PAGE_GLOBAL040
|
219 _PAGE_ACCESSED
| _PAGE_DIRTY
);
221 case IOMAP_FULL_CACHING
:
222 physaddr
|= _PAGE_CACHE040
;
224 case IOMAP_NOCACHE_SER
:
226 physaddr
|= _PAGE_NOCACHE_S
;
228 case IOMAP_NOCACHE_NONSER
:
229 physaddr
|= _PAGE_NOCACHE
;
231 case IOMAP_WRITETHROUGH
:
232 physaddr
|= _PAGE_CACHE040W
;
236 physaddr
|= (_PAGE_PRESENT
| _PAGE_ACCESSED
|
237 _PAGE_DIRTY
| _PAGE_READWRITE
);
239 case IOMAP_NOCACHE_SER
:
240 case IOMAP_NOCACHE_NONSER
:
242 physaddr
|= _PAGE_NOCACHE030
;
244 case IOMAP_FULL_CACHING
:
245 case IOMAP_WRITETHROUGH
:
250 while ((long)size
> 0) {
252 if (!(virtaddr
& (PTRTREESIZE
-1)))
253 printk ("\npa=%#lx va=%#lx ", physaddr
, virtaddr
);
255 pgd_dir
= pgd_offset_k(virtaddr
);
256 p4d_dir
= p4d_offset(pgd_dir
, virtaddr
);
257 pud_dir
= pud_offset(p4d_dir
, virtaddr
);
258 pmd_dir
= pmd_alloc(&init_mm
, pud_dir
, virtaddr
);
260 printk("ioremap: no mem for pmd_dir\n");
264 #if CONFIG_PGTABLE_LEVELS == 3
265 if (CPU_IS_020_OR_030
) {
266 pmd_dir
->pmd
[(virtaddr
/PTRTREESIZE
) & 15] = physaddr
;
267 physaddr
+= PTRTREESIZE
;
268 virtaddr
+= PTRTREESIZE
;
273 pte_dir
= pte_alloc_kernel(pmd_dir
, virtaddr
);
275 printk("ioremap: no mem for pte_dir\n");
279 pte_val(*pte_dir
) = physaddr
;
280 virtaddr
+= PAGE_SIZE
;
281 physaddr
+= PAGE_SIZE
;
290 return (void __iomem
*)retaddr
;
292 EXPORT_SYMBOL(__ioremap
);
295 * Unmap an ioremap()ed region again
297 void iounmap(void __iomem
*addr
)
300 if ((!MACH_IS_AMIGA
) ||
301 (((unsigned long)addr
< 0x40000000) ||
302 ((unsigned long)addr
> 0x60000000)))
303 free_io_area((__force
void *)addr
);
305 #ifdef CONFIG_COLDFIRE
306 if (cf_internalio(addr
))
309 free_io_area((__force
void *)addr
);
312 EXPORT_SYMBOL(iounmap
);
315 * Set new cache mode for some kernel address space.
316 * The caller must push data for that range itself, if such data may already
319 void kernel_set_cachemode(void *addr
, unsigned long size
, int cmode
)
321 unsigned long virtaddr
= (unsigned long)addr
;
328 if (CPU_IS_040_OR_060
) {
330 case IOMAP_FULL_CACHING
:
331 cmode
= _PAGE_CACHE040
;
333 case IOMAP_NOCACHE_SER
:
335 cmode
= _PAGE_NOCACHE_S
;
337 case IOMAP_NOCACHE_NONSER
:
338 cmode
= _PAGE_NOCACHE
;
340 case IOMAP_WRITETHROUGH
:
341 cmode
= _PAGE_CACHE040W
;
346 case IOMAP_NOCACHE_SER
:
347 case IOMAP_NOCACHE_NONSER
:
349 cmode
= _PAGE_NOCACHE030
;
351 case IOMAP_FULL_CACHING
:
352 case IOMAP_WRITETHROUGH
:
357 while ((long)size
> 0) {
358 pgd_dir
= pgd_offset_k(virtaddr
);
359 p4d_dir
= p4d_offset(pgd_dir
, virtaddr
);
360 pud_dir
= pud_offset(p4d_dir
, virtaddr
);
361 if (pud_bad(*pud_dir
)) {
362 printk("iocachemode: bad pud(%08lx)\n", pud_val(*pud_dir
));
366 pmd_dir
= pmd_offset(pud_dir
, virtaddr
);
368 #if CONFIG_PGTABLE_LEVELS == 3
369 if (CPU_IS_020_OR_030
) {
370 int pmd_off
= (virtaddr
/PTRTREESIZE
) & 15;
372 if ((pmd_dir
->pmd
[pmd_off
] & _DESCTYPE_MASK
) == _PAGE_PRESENT
) {
373 pmd_dir
->pmd
[pmd_off
] = (pmd_dir
->pmd
[pmd_off
] &
374 _CACHEMASK040
) | cmode
;
375 virtaddr
+= PTRTREESIZE
;
382 if (pmd_bad(*pmd_dir
)) {
383 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir
));
387 pte_dir
= pte_offset_kernel(pmd_dir
, virtaddr
);
389 pte_val(*pte_dir
) = (pte_val(*pte_dir
) & _CACHEMASK040
) | cmode
;
390 virtaddr
+= PAGE_SIZE
;
396 EXPORT_SYMBOL(kernel_set_cachemode
);