1 // SPDX-License-Identifier: GPL-2.0-only
3 * Fixmap manipulation code
7 #include <linux/init.h>
8 #include <linux/kernel.h>
9 #include <linux/libfdt.h>
10 #include <linux/memory.h>
12 #include <linux/sizes.h>
14 #include <asm/fixmap.h>
15 #include <asm/kernel-pgtable.h>
16 #include <asm/pgalloc.h>
17 #include <asm/tlbflush.h>
19 /* ensure that the fixmap region does not grow down into the PCI I/O region */
20 static_assert(FIXADDR_TOT_START
> PCI_IO_END
);
22 #define NR_BM_PTE_TABLES \
23 SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT)
24 #define NR_BM_PMD_TABLES \
25 SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PUD_SHIFT)
27 static_assert(NR_BM_PMD_TABLES
== 1);
29 #define __BM_TABLE_IDX(addr, shift) \
30 (((addr) >> (shift)) - (FIXADDR_TOT_START >> (shift)))
32 #define BM_PTE_TABLE_IDX(addr) __BM_TABLE_IDX(addr, PMD_SHIFT)
34 static pte_t bm_pte
[NR_BM_PTE_TABLES
][PTRS_PER_PTE
] __page_aligned_bss
;
35 static pmd_t bm_pmd
[PTRS_PER_PMD
] __page_aligned_bss __maybe_unused
;
36 static pud_t bm_pud
[PTRS_PER_PUD
] __page_aligned_bss __maybe_unused
;
38 static inline pte_t
*fixmap_pte(unsigned long addr
)
40 return &bm_pte
[BM_PTE_TABLE_IDX(addr
)][pte_index(addr
)];
43 static void __init
early_fixmap_init_pte(pmd_t
*pmdp
, unsigned long addr
)
45 pmd_t pmd
= READ_ONCE(*pmdp
);
49 ptep
= bm_pte
[BM_PTE_TABLE_IDX(addr
)];
50 __pmd_populate(pmdp
, __pa_symbol(ptep
),
51 PMD_TYPE_TABLE
| PMD_TABLE_AF
);
55 static void __init
early_fixmap_init_pmd(pud_t
*pudp
, unsigned long addr
,
59 pud_t pud
= READ_ONCE(*pudp
);
63 __pud_populate(pudp
, __pa_symbol(bm_pmd
),
64 PUD_TYPE_TABLE
| PUD_TABLE_AF
);
66 pmdp
= pmd_offset_kimg(pudp
, addr
);
68 next
= pmd_addr_end(addr
, end
);
69 early_fixmap_init_pte(pmdp
, addr
);
70 } while (pmdp
++, addr
= next
, addr
!= end
);
74 static void __init
early_fixmap_init_pud(p4d_t
*p4dp
, unsigned long addr
,
77 p4d_t p4d
= READ_ONCE(*p4dp
);
80 if (CONFIG_PGTABLE_LEVELS
> 3 && !p4d_none(p4d
) &&
81 p4d_page_paddr(p4d
) != __pa_symbol(bm_pud
)) {
83 * We only end up here if the kernel mapping and the fixmap
84 * share the top level pgd entry, which should only happen on
85 * 16k/4 levels configurations.
87 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES
));
91 __p4d_populate(p4dp
, __pa_symbol(bm_pud
),
92 P4D_TYPE_TABLE
| P4D_TABLE_AF
);
94 pudp
= pud_offset_kimg(p4dp
, addr
);
95 early_fixmap_init_pmd(pudp
, addr
, end
);
99 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
100 * directly on kernel symbols (bm_p*d). This function is called too early to use
101 * lm_alias so __p*d_populate functions must be used to populate with the
102 * physical address from __pa_symbol.
104 void __init
early_fixmap_init(void)
106 unsigned long addr
= FIXADDR_TOT_START
;
107 unsigned long end
= FIXADDR_TOP
;
109 pgd_t
*pgdp
= pgd_offset_k(addr
);
110 p4d_t
*p4dp
= p4d_offset_kimg(pgdp
, addr
);
112 early_fixmap_init_pud(p4dp
, addr
, end
);
116 * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
117 * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
119 void __set_fixmap(enum fixed_addresses idx
,
120 phys_addr_t phys
, pgprot_t flags
)
122 unsigned long addr
= __fix_to_virt(idx
);
125 BUG_ON(idx
<= FIX_HOLE
|| idx
>= __end_of_fixed_addresses
);
127 ptep
= fixmap_pte(addr
);
129 if (pgprot_val(flags
)) {
130 __set_pte(ptep
, pfn_pte(phys
>> PAGE_SHIFT
, flags
));
132 __pte_clear(&init_mm
, addr
, ptep
);
133 flush_tlb_kernel_range(addr
, addr
+PAGE_SIZE
);
137 void *__init
fixmap_remap_fdt(phys_addr_t dt_phys
, int *size
, pgprot_t prot
)
139 const u64 dt_virt_base
= __fix_to_virt(FIX_FDT
);
140 phys_addr_t dt_phys_base
;
145 * Check whether the physical FDT address is set and meets the minimum
146 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
147 * at least 8 bytes so that we can always access the magic and size
148 * fields of the FDT header after mapping the first chunk, double check
149 * here if that is indeed the case.
151 BUILD_BUG_ON(MIN_FDT_ALIGN
< 8);
152 if (!dt_phys
|| dt_phys
% MIN_FDT_ALIGN
)
155 dt_phys_base
= round_down(dt_phys
, PAGE_SIZE
);
156 offset
= dt_phys
% PAGE_SIZE
;
157 dt_virt
= (void *)dt_virt_base
+ offset
;
159 /* map the first chunk so we can read the size from the header */
160 create_mapping_noalloc(dt_phys_base
, dt_virt_base
, PAGE_SIZE
, prot
);
162 if (fdt_magic(dt_virt
) != FDT_MAGIC
)
165 *size
= fdt_totalsize(dt_virt
);
166 if (*size
> MAX_FDT_SIZE
)
169 if (offset
+ *size
> PAGE_SIZE
) {
170 create_mapping_noalloc(dt_phys_base
, dt_virt_base
,
171 offset
+ *size
, prot
);