1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/m68k/mm/kmap.c
5 * Copyright (C) 1997 Roman Hodek
7 * 10/01/99 cleaned up the code and changing to the same interface
8 * used by other architectures /Roman Zippel
11 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
19 #include <asm/setup.h>
20 #include <asm/segment.h>
23 #include <asm/tlbflush.h>
28 * For 040/060 we can use the virtual memory area like other architectures,
29 * but for 020/030 we want to use early termination page descriptors and we
30 * can't mix this with normal page descriptors, so we have to copy that code
31 * (mm/vmalloc.c) and return appropriately aligned addresses.
34 #ifdef CPU_M68040_OR_M68060_ONLY
36 #define IO_SIZE PAGE_SIZE
38 static inline struct vm_struct
*get_io_area(unsigned long size
)
40 return get_vm_area(size
, VM_IOREMAP
);
44 static inline void free_io_area(void *addr
)
46 vfree((void *)(PAGE_MASK
& (unsigned long)addr
));
51 #define IO_SIZE PMD_SIZE
53 static struct vm_struct
*iolist
;
56 * __free_io_area unmaps nearly everything, so be careful
57 * Currently it doesn't free pointer/page tables anymore but this
58 * wasn't used anyway and might be added later.
60 static void __free_io_area(void *addr
, unsigned long size
)
62 unsigned long virtaddr
= (unsigned long)addr
;
69 while ((long)size
> 0) {
70 pgd_dir
= pgd_offset_k(virtaddr
);
71 p4d_dir
= p4d_offset(pgd_dir
, virtaddr
);
72 pud_dir
= pud_offset(p4d_dir
, virtaddr
);
73 if (pud_bad(*pud_dir
)) {
74 printk("iounmap: bad pud(%08lx)\n", pud_val(*pud_dir
));
78 pmd_dir
= pmd_offset(pud_dir
, virtaddr
);
80 #if CONFIG_PGTABLE_LEVELS == 3
81 if (CPU_IS_020_OR_030
) {
82 int pmd_type
= pmd_val(*pmd_dir
) & _DESCTYPE_MASK
;
84 if (pmd_type
== _PAGE_PRESENT
) {
89 } else if (pmd_type
== 0)
94 if (pmd_bad(*pmd_dir
)) {
95 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir
));
99 pte_dir
= pte_offset_kernel(pmd_dir
, virtaddr
);
101 pte_val(*pte_dir
) = 0;
102 virtaddr
+= PAGE_SIZE
;
109 static struct vm_struct
*get_io_area(unsigned long size
)
112 struct vm_struct
**p
, *tmp
, *area
;
114 area
= kmalloc(sizeof(*area
), GFP_KERNEL
);
118 for (p
= &iolist
; (tmp
= *p
) ; p
= &tmp
->next
) {
119 if (size
+ addr
< (unsigned long)tmp
->addr
)
121 if (addr
> KMAP_END
-size
) {
125 addr
= tmp
->size
+ (unsigned long)tmp
->addr
;
127 area
->addr
= (void *)addr
;
128 area
->size
= size
+ IO_SIZE
;
134 static inline void free_io_area(void *addr
)
136 struct vm_struct
**p
, *tmp
;
140 addr
= (void *)((unsigned long)addr
& -IO_SIZE
);
141 for (p
= &iolist
; (tmp
= *p
) ; p
= &tmp
->next
) {
142 if (tmp
->addr
== addr
) {
144 /* remove gap added in get_io_area() */
145 __free_io_area(tmp
->addr
, tmp
->size
- IO_SIZE
);
155 * Map some physical address range into the kernel address space.
157 /* Rewritten by Andreas Schwab to remove all races. */
159 void __iomem
*__ioremap(unsigned long physaddr
, unsigned long size
, int cacheflag
)
161 struct vm_struct
*area
;
162 unsigned long virtaddr
, retaddr
;
171 * Don't allow mappings that wrap..
173 if (!size
|| physaddr
> (unsigned long)(-size
))
178 if ((physaddr
>= 0x40000000) && (physaddr
+ size
< 0x60000000)
179 && (cacheflag
== IOMAP_NOCACHE_SER
))
180 return (void __iomem
*)physaddr
;
183 #ifdef CONFIG_COLDFIRE
184 if (__cf_internalio(physaddr
))
185 return (void __iomem
*) physaddr
;
189 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr
, size
, cacheflag
);
192 * Mappings have to be aligned
194 offset
= physaddr
& (IO_SIZE
- 1);
195 physaddr
&= -IO_SIZE
;
196 size
= (size
+ offset
+ IO_SIZE
- 1) & -IO_SIZE
;
201 area
= get_io_area(size
);
205 virtaddr
= (unsigned long)area
->addr
;
206 retaddr
= virtaddr
+ offset
;
208 printk("0x%lx,0x%lx,0x%lx", physaddr
, virtaddr
, retaddr
);
212 * add cache and table flags to physical address
214 if (CPU_IS_040_OR_060
) {
215 physaddr
|= (_PAGE_PRESENT
| _PAGE_GLOBAL040
|
216 _PAGE_ACCESSED
| _PAGE_DIRTY
);
218 case IOMAP_FULL_CACHING
:
219 physaddr
|= _PAGE_CACHE040
;
221 case IOMAP_NOCACHE_SER
:
223 physaddr
|= _PAGE_NOCACHE_S
;
225 case IOMAP_NOCACHE_NONSER
:
226 physaddr
|= _PAGE_NOCACHE
;
228 case IOMAP_WRITETHROUGH
:
229 physaddr
|= _PAGE_CACHE040W
;
233 physaddr
|= (_PAGE_PRESENT
| _PAGE_ACCESSED
|
234 _PAGE_DIRTY
| _PAGE_READWRITE
);
236 case IOMAP_NOCACHE_SER
:
237 case IOMAP_NOCACHE_NONSER
:
239 physaddr
|= _PAGE_NOCACHE030
;
241 case IOMAP_FULL_CACHING
:
242 case IOMAP_WRITETHROUGH
:
247 while ((long)size
> 0) {
249 if (!(virtaddr
& (PMD_SIZE
-1)))
250 printk ("\npa=%#lx va=%#lx ", physaddr
, virtaddr
);
252 pgd_dir
= pgd_offset_k(virtaddr
);
253 p4d_dir
= p4d_offset(pgd_dir
, virtaddr
);
254 pud_dir
= pud_offset(p4d_dir
, virtaddr
);
255 pmd_dir
= pmd_alloc(&init_mm
, pud_dir
, virtaddr
);
257 printk("ioremap: no mem for pmd_dir\n");
261 #if CONFIG_PGTABLE_LEVELS == 3
262 if (CPU_IS_020_OR_030
) {
263 pmd_val(*pmd_dir
) = physaddr
;
264 physaddr
+= PMD_SIZE
;
265 virtaddr
+= PMD_SIZE
;
270 pte_dir
= pte_alloc_kernel(pmd_dir
, virtaddr
);
272 printk("ioremap: no mem for pte_dir\n");
276 pte_val(*pte_dir
) = physaddr
;
277 virtaddr
+= PAGE_SIZE
;
278 physaddr
+= PAGE_SIZE
;
287 return (void __iomem
*)retaddr
;
289 EXPORT_SYMBOL(__ioremap
);
292 * Unmap an ioremap()ed region again
294 void iounmap(void __iomem
*addr
)
297 if ((!MACH_IS_AMIGA
) ||
298 (((unsigned long)addr
< 0x40000000) ||
299 ((unsigned long)addr
> 0x60000000)))
300 free_io_area((__force
void *)addr
);
302 #ifdef CONFIG_COLDFIRE
303 if (cf_internalio(addr
))
306 free_io_area((__force
void *)addr
);
309 EXPORT_SYMBOL(iounmap
);
312 * Set new cache mode for some kernel address space.
313 * The caller must push data for that range itself, if such data may already
316 void kernel_set_cachemode(void *addr
, unsigned long size
, int cmode
)
318 unsigned long virtaddr
= (unsigned long)addr
;
325 if (CPU_IS_040_OR_060
) {
327 case IOMAP_FULL_CACHING
:
328 cmode
= _PAGE_CACHE040
;
330 case IOMAP_NOCACHE_SER
:
332 cmode
= _PAGE_NOCACHE_S
;
334 case IOMAP_NOCACHE_NONSER
:
335 cmode
= _PAGE_NOCACHE
;
337 case IOMAP_WRITETHROUGH
:
338 cmode
= _PAGE_CACHE040W
;
343 case IOMAP_NOCACHE_SER
:
344 case IOMAP_NOCACHE_NONSER
:
346 cmode
= _PAGE_NOCACHE030
;
348 case IOMAP_FULL_CACHING
:
349 case IOMAP_WRITETHROUGH
:
354 while ((long)size
> 0) {
355 pgd_dir
= pgd_offset_k(virtaddr
);
356 p4d_dir
= p4d_offset(pgd_dir
, virtaddr
);
357 pud_dir
= pud_offset(p4d_dir
, virtaddr
);
358 if (pud_bad(*pud_dir
)) {
359 printk("iocachemode: bad pud(%08lx)\n", pud_val(*pud_dir
));
363 pmd_dir
= pmd_offset(pud_dir
, virtaddr
);
365 #if CONFIG_PGTABLE_LEVELS == 3
366 if (CPU_IS_020_OR_030
) {
367 unsigned long pmd
= pmd_val(*pmd_dir
);
369 if ((pmd
& _DESCTYPE_MASK
) == _PAGE_PRESENT
) {
370 *pmd_dir
= __pmd((pmd
& _CACHEMASK040
) | cmode
);
371 virtaddr
+= PMD_SIZE
;
378 if (pmd_bad(*pmd_dir
)) {
379 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir
));
383 pte_dir
= pte_offset_kernel(pmd_dir
, virtaddr
);
385 pte_val(*pte_dir
) = (pte_val(*pte_dir
) & _CACHEMASK040
) | cmode
;
386 virtaddr
+= PAGE_SIZE
;
392 EXPORT_SYMBOL(kernel_set_cachemode
);