1 // SPDX-License-Identifier: GPL-2.0
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
7 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/vmalloc.h>
11 #include <linux/sched.h>
13 #include <linux/export.h>
14 #include <asm/cacheflush.h>
16 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
17 static int __read_mostly ioremap_p4d_capable
;
18 static int __read_mostly ioremap_pud_capable
;
19 static int __read_mostly ioremap_pmd_capable
;
20 static int __read_mostly ioremap_huge_disabled
;
22 static int __init
set_nohugeiomap(char *str
)
24 ioremap_huge_disabled
= 1;
27 early_param("nohugeiomap", set_nohugeiomap
);
29 void __init
ioremap_huge_init(void)
31 if (!ioremap_huge_disabled
) {
32 if (arch_ioremap_p4d_supported())
33 ioremap_p4d_capable
= 1;
34 if (arch_ioremap_pud_supported())
35 ioremap_pud_capable
= 1;
36 if (arch_ioremap_pmd_supported())
37 ioremap_pmd_capable
= 1;
41 static inline int ioremap_p4d_enabled(void)
43 return ioremap_p4d_capable
;
46 static inline int ioremap_pud_enabled(void)
48 return ioremap_pud_capable
;
51 static inline int ioremap_pmd_enabled(void)
53 return ioremap_pmd_capable
;
56 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
57 static inline int ioremap_p4d_enabled(void) { return 0; }
58 static inline int ioremap_pud_enabled(void) { return 0; }
59 static inline int ioremap_pmd_enabled(void) { return 0; }
60 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
62 static int ioremap_pte_range(pmd_t
*pmd
, unsigned long addr
,
63 unsigned long end
, phys_addr_t phys_addr
, pgprot_t prot
,
69 pfn
= phys_addr
>> PAGE_SHIFT
;
70 pte
= pte_alloc_kernel_track(pmd
, addr
, mask
);
74 BUG_ON(!pte_none(*pte
));
75 set_pte_at(&init_mm
, addr
, pte
, pfn_pte(pfn
, prot
));
77 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
78 *mask
|= PGTBL_PTE_MODIFIED
;
82 static int ioremap_try_huge_pmd(pmd_t
*pmd
, unsigned long addr
,
83 unsigned long end
, phys_addr_t phys_addr
,
86 if (!ioremap_pmd_enabled())
89 if ((end
- addr
) != PMD_SIZE
)
92 if (!IS_ALIGNED(addr
, PMD_SIZE
))
95 if (!IS_ALIGNED(phys_addr
, PMD_SIZE
))
98 if (pmd_present(*pmd
) && !pmd_free_pte_page(pmd
, addr
))
101 return pmd_set_huge(pmd
, phys_addr
, prot
);
104 static inline int ioremap_pmd_range(pud_t
*pud
, unsigned long addr
,
105 unsigned long end
, phys_addr_t phys_addr
, pgprot_t prot
,
106 pgtbl_mod_mask
*mask
)
111 pmd
= pmd_alloc_track(&init_mm
, pud
, addr
, mask
);
115 next
= pmd_addr_end(addr
, end
);
117 if (ioremap_try_huge_pmd(pmd
, addr
, next
, phys_addr
, prot
)) {
118 *mask
|= PGTBL_PMD_MODIFIED
;
122 if (ioremap_pte_range(pmd
, addr
, next
, phys_addr
, prot
, mask
))
124 } while (pmd
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
128 static int ioremap_try_huge_pud(pud_t
*pud
, unsigned long addr
,
129 unsigned long end
, phys_addr_t phys_addr
,
132 if (!ioremap_pud_enabled())
135 if ((end
- addr
) != PUD_SIZE
)
138 if (!IS_ALIGNED(addr
, PUD_SIZE
))
141 if (!IS_ALIGNED(phys_addr
, PUD_SIZE
))
144 if (pud_present(*pud
) && !pud_free_pmd_page(pud
, addr
))
147 return pud_set_huge(pud
, phys_addr
, prot
);
150 static inline int ioremap_pud_range(p4d_t
*p4d
, unsigned long addr
,
151 unsigned long end
, phys_addr_t phys_addr
, pgprot_t prot
,
152 pgtbl_mod_mask
*mask
)
157 pud
= pud_alloc_track(&init_mm
, p4d
, addr
, mask
);
161 next
= pud_addr_end(addr
, end
);
163 if (ioremap_try_huge_pud(pud
, addr
, next
, phys_addr
, prot
)) {
164 *mask
|= PGTBL_PUD_MODIFIED
;
168 if (ioremap_pmd_range(pud
, addr
, next
, phys_addr
, prot
, mask
))
170 } while (pud
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
174 static int ioremap_try_huge_p4d(p4d_t
*p4d
, unsigned long addr
,
175 unsigned long end
, phys_addr_t phys_addr
,
178 if (!ioremap_p4d_enabled())
181 if ((end
- addr
) != P4D_SIZE
)
184 if (!IS_ALIGNED(addr
, P4D_SIZE
))
187 if (!IS_ALIGNED(phys_addr
, P4D_SIZE
))
190 if (p4d_present(*p4d
) && !p4d_free_pud_page(p4d
, addr
))
193 return p4d_set_huge(p4d
, phys_addr
, prot
);
196 static inline int ioremap_p4d_range(pgd_t
*pgd
, unsigned long addr
,
197 unsigned long end
, phys_addr_t phys_addr
, pgprot_t prot
,
198 pgtbl_mod_mask
*mask
)
203 p4d
= p4d_alloc_track(&init_mm
, pgd
, addr
, mask
);
207 next
= p4d_addr_end(addr
, end
);
209 if (ioremap_try_huge_p4d(p4d
, addr
, next
, phys_addr
, prot
)) {
210 *mask
|= PGTBL_P4D_MODIFIED
;
214 if (ioremap_pud_range(p4d
, addr
, next
, phys_addr
, prot
, mask
))
216 } while (p4d
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
220 int ioremap_page_range(unsigned long addr
,
221 unsigned long end
, phys_addr_t phys_addr
, pgprot_t prot
)
227 pgtbl_mod_mask mask
= 0;
233 pgd
= pgd_offset_k(addr
);
235 next
= pgd_addr_end(addr
, end
);
236 err
= ioremap_p4d_range(pgd
, addr
, next
, phys_addr
, prot
,
240 } while (pgd
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
242 flush_cache_vmap(start
, end
);
244 if (mask
& ARCH_PAGE_TABLE_SYNC_MASK
)
245 arch_sync_kernel_mappings(start
, end
);
250 #ifdef CONFIG_GENERIC_IOREMAP
251 void __iomem
*ioremap_prot(phys_addr_t addr
, size_t size
, unsigned long prot
)
253 unsigned long offset
, vaddr
;
254 phys_addr_t last_addr
;
255 struct vm_struct
*area
;
257 /* Disallow wrap-around or zero size */
258 last_addr
= addr
+ size
- 1;
259 if (!size
|| last_addr
< addr
)
262 /* Page-align mappings */
263 offset
= addr
& (~PAGE_MASK
);
265 size
= PAGE_ALIGN(size
+ offset
);
267 area
= get_vm_area_caller(size
, VM_IOREMAP
,
268 __builtin_return_address(0));
271 vaddr
= (unsigned long)area
->addr
;
273 if (ioremap_page_range(vaddr
, vaddr
+ size
, addr
, __pgprot(prot
))) {
278 return (void __iomem
*)(vaddr
+ offset
);
280 EXPORT_SYMBOL(ioremap_prot
);
282 void iounmap(volatile void __iomem
*addr
)
284 vunmap((void *)((unsigned long)addr
& PAGE_MASK
));
286 EXPORT_SYMBOL(iounmap
);
287 #endif /* CONFIG_GENERIC_IOREMAP */