1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
6 #include <linux/memblock.h>
7 #include <linux/export.h>
8 #include <linux/highmem.h>
9 #include <linux/pgtable.h>
10 #include <asm/processor.h>
11 #include <asm/pgalloc.h>
12 #include <asm/tlbflush.h>
17 * kmap() API provides sleep semantics hence referred to as "permanent maps"
18 * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
21 * kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides
22 * shortlived ala "temporary mappings" which historically were implemented as
23 * fixmaps (compile time addr etc). Their book-keeping is done per cpu.
25 * Both these facts combined (preemption disabled and per-cpu allocation)
26 * means the total number of concurrent fixmaps will be limited to max
27 * such allocations in a single control path. Thus KM_TYPE_NR (another
28 * historic relic) is a small'ish number which caps max percpu fixmaps
32 * - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module)
33 * is now shared between vmalloc and kmap (non overlapping though)
35 * - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD
36 * This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
37 * 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
39 * - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per
40 * CPU. So the number of CPUs sharing a single PTE page is limited.
42 * - pkmap being preemptible, in theory could do with more than 256 concurrent
43 * mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
44 * the PGD and only works with a single page table @pkmap_page_table, hence
48 extern pte_t
* pkmap_page_table
;
50 static noinline pte_t
* __init
alloc_kmap_pgtable(unsigned long kvaddr
)
52 pmd_t
*pmd_k
= pmd_off_k(kvaddr
);
55 pte_k
= (pte_t
*)memblock_alloc_low(PAGE_SIZE
, PAGE_SIZE
);
57 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
58 __func__
, PAGE_SIZE
, PAGE_SIZE
);
60 pmd_populate_kernel(&init_mm
, pmd_k
, pte_k
);
64 void __init
kmap_init(void)
66 /* Due to recursive include hell, we can't do this in processor.h */
67 BUILD_BUG_ON(PAGE_OFFSET
< (VMALLOC_END
+ FIXMAP_SIZE
+ PKMAP_SIZE
));
68 BUILD_BUG_ON(LAST_PKMAP
> PTRS_PER_PTE
);
69 BUILD_BUG_ON(FIX_KMAP_SLOTS
> PTRS_PER_PTE
);
71 pkmap_page_table
= alloc_kmap_pgtable(PKMAP_BASE
);
72 alloc_kmap_pgtable(FIXMAP_BASE
);