2 #include <linux/initrd.h>
3 #include <linux/ioport.h>
4 #include <linux/swap.h>
5 #include <linux/memblock.h>
6 #include <linux/swapfile.h>
7 #include <linux/swapops.h>
8 #include <linux/kmemleak.h>
9 #include <linux/sched/task.h>
10 #include <linux/execmem.h>
12 #include <asm/set_memory.h>
13 #include <asm/cpu_device_id.h>
14 #include <asm/e820/api.h>
17 #include <asm/page_types.h>
18 #include <asm/sections.h>
19 #include <asm/setup.h>
20 #include <asm/tlbflush.h>
22 #include <asm/proto.h>
23 #include <asm/dma.h> /* for MAX_DMA_PFN */
24 #include <asm/kaslr.h>
25 #include <asm/hypervisor.h>
26 #include <asm/cpufeature.h>
28 #include <asm/text-patching.h>
29 #include <asm/memtype.h>
30 #include <asm/paravirt.h>
33 * We need to define the tracepoints somewhere, and tlb.c
34 * is only compiled when SMP=y.
36 #include <trace/events/tlb.h>
38 #include "mm_internal.h"
41 * Tables translating between page_cache_type_t and pte encoding.
43 * The default values are defined statically as minimal supported mode;
44 * WC and WT fall back to UC-. pat_init() updates these values to support
45 * more cache modes, WC and WT, when it is safe to do so. See pat_init()
46 * for the details. Note, __early_ioremap() used during early boot-time
47 * takes pgprot_t (pte encoding) and does not use these tables.
49 * Index into __cachemode2pte_tbl[] is the cachemode.
51 * Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte
52 * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
54 static uint16_t __cachemode2pte_tbl
[_PAGE_CACHE_MODE_NUM
] = {
55 [_PAGE_CACHE_MODE_WB
] = 0 | 0 ,
56 [_PAGE_CACHE_MODE_WC
] = 0 | _PAGE_PCD
,
57 [_PAGE_CACHE_MODE_UC_MINUS
] = 0 | _PAGE_PCD
,
58 [_PAGE_CACHE_MODE_UC
] = _PAGE_PWT
| _PAGE_PCD
,
59 [_PAGE_CACHE_MODE_WT
] = 0 | _PAGE_PCD
,
60 [_PAGE_CACHE_MODE_WP
] = 0 | _PAGE_PCD
,
63 unsigned long cachemode2protval(enum page_cache_mode pcm
)
67 return __cachemode2pte_tbl
[pcm
];
69 EXPORT_SYMBOL(cachemode2protval
);
71 static uint8_t __pte2cachemode_tbl
[8] = {
72 [__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB
,
73 [__pte2cm_idx(_PAGE_PWT
| 0 | 0 )] = _PAGE_CACHE_MODE_UC_MINUS
,
74 [__pte2cm_idx( 0 | _PAGE_PCD
| 0 )] = _PAGE_CACHE_MODE_UC_MINUS
,
75 [__pte2cm_idx(_PAGE_PWT
| _PAGE_PCD
| 0 )] = _PAGE_CACHE_MODE_UC
,
76 [__pte2cm_idx( 0 | 0 | _PAGE_PAT
)] = _PAGE_CACHE_MODE_WB
,
77 [__pte2cm_idx(_PAGE_PWT
| 0 | _PAGE_PAT
)] = _PAGE_CACHE_MODE_UC_MINUS
,
78 [__pte2cm_idx(0 | _PAGE_PCD
| _PAGE_PAT
)] = _PAGE_CACHE_MODE_UC_MINUS
,
79 [__pte2cm_idx(_PAGE_PWT
| _PAGE_PCD
| _PAGE_PAT
)] = _PAGE_CACHE_MODE_UC
,
83 * Check that the write-protect PAT entry is set for write-protect.
84 * To do this without making assumptions how PAT has been set up (Xen has
85 * another layout than the kernel), translate the _PAGE_CACHE_MODE_WP cache
86 * mode via the __cachemode2pte_tbl[] into protection bits (those protection
87 * bits will select a cache mode of WP or better), and then translate the
88 * protection bits back into the cache mode using __pte2cm_idx() and the
89 * __pte2cachemode_tbl[] array. This will return the really used cache mode.
91 bool x86_has_pat_wp(void)
93 uint16_t prot
= __cachemode2pte_tbl
[_PAGE_CACHE_MODE_WP
];
95 return __pte2cachemode_tbl
[__pte2cm_idx(prot
)] == _PAGE_CACHE_MODE_WP
;
98 enum page_cache_mode
pgprot2cachemode(pgprot_t pgprot
)
100 unsigned long masked
;
102 masked
= pgprot_val(pgprot
) & _PAGE_CACHE_MASK
;
103 if (likely(masked
== 0))
105 return __pte2cachemode_tbl
[__pte2cm_idx(masked
)];
108 static unsigned long __initdata pgt_buf_start
;
109 static unsigned long __initdata pgt_buf_end
;
110 static unsigned long __initdata pgt_buf_top
;
112 static unsigned long min_pfn_mapped
;
114 static bool __initdata can_use_brk_pgt
= true;
117 * Pages returned are already directly mapped.
119 * Changing that is likely to break Xen, see commit:
121 * 279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve
123 * for detailed information.
125 __ref
void *alloc_low_pages(unsigned int num
)
133 order
= get_order((unsigned long)num
<< PAGE_SHIFT
);
134 return (void *)__get_free_pages(GFP_ATOMIC
| __GFP_ZERO
, order
);
137 if ((pgt_buf_end
+ num
) > pgt_buf_top
|| !can_use_brk_pgt
) {
138 unsigned long ret
= 0;
140 if (min_pfn_mapped
< max_pfn_mapped
) {
141 ret
= memblock_phys_alloc_range(
142 PAGE_SIZE
* num
, PAGE_SIZE
,
143 min_pfn_mapped
<< PAGE_SHIFT
,
144 max_pfn_mapped
<< PAGE_SHIFT
);
146 if (!ret
&& can_use_brk_pgt
)
147 ret
= __pa(extend_brk(PAGE_SIZE
* num
, PAGE_SIZE
));
150 panic("alloc_low_pages: can not alloc memory");
152 pfn
= ret
>> PAGE_SHIFT
;
158 for (i
= 0; i
< num
; i
++) {
161 adr
= __va((pfn
+ i
) << PAGE_SHIFT
);
165 return __va(pfn
<< PAGE_SHIFT
);
169 * By default need to be able to allocate page tables below PGD firstly for
170 * the 0-ISA_END_ADDRESS range and secondly for the initial PMD_SIZE mapping.
171 * With KASLR memory randomization, depending on the machine e820 memory and the
172 * PUD alignment, twice that many pages may be needed when KASLR memory
173 * randomization is enabled.
176 #ifndef CONFIG_X86_5LEVEL
177 #define INIT_PGD_PAGE_TABLES 3
179 #define INIT_PGD_PAGE_TABLES 4
182 #ifndef CONFIG_RANDOMIZE_MEMORY
183 #define INIT_PGD_PAGE_COUNT (2 * INIT_PGD_PAGE_TABLES)
185 #define INIT_PGD_PAGE_COUNT (4 * INIT_PGD_PAGE_TABLES)
188 #define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
189 RESERVE_BRK(early_pgt_alloc
, INIT_PGT_BUF_SIZE
);
190 void __init
early_alloc_pgt_buf(void)
192 unsigned long tables
= INIT_PGT_BUF_SIZE
;
195 base
= __pa(extend_brk(tables
, PAGE_SIZE
));
197 pgt_buf_start
= base
>> PAGE_SHIFT
;
198 pgt_buf_end
= pgt_buf_start
;
199 pgt_buf_top
= pgt_buf_start
+ (tables
>> PAGE_SHIFT
);
204 early_param_on_off("gbpages", "nogbpages", direct_gbpages
, CONFIG_X86_DIRECT_GBPAGES
);
209 unsigned page_size_mask
;
212 static int page_size_mask
;
215 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
216 * enable and PPro Global page enable), so that any CPU's that boot
217 * up after us can get the correct flags. Invoked on the boot CPU.
219 static inline void cr4_set_bits_and_update_boot(unsigned long mask
)
221 mmu_cr4_features
|= mask
;
222 if (trampoline_cr4_features
)
223 *trampoline_cr4_features
= mmu_cr4_features
;
227 static void __init
probe_page_size_mask(void)
230 * For pagealloc debugging, identity mapping will use small pages.
231 * This will simplify cpa(), which otherwise needs to support splitting
232 * large pages into small in interrupt context, etc.
234 if (boot_cpu_has(X86_FEATURE_PSE
) && !debug_pagealloc_enabled())
235 page_size_mask
|= 1 << PG_LEVEL_2M
;
239 /* Enable PSE if available */
240 if (boot_cpu_has(X86_FEATURE_PSE
))
241 cr4_set_bits_and_update_boot(X86_CR4_PSE
);
243 /* Enable PGE if available */
244 __supported_pte_mask
&= ~_PAGE_GLOBAL
;
245 if (boot_cpu_has(X86_FEATURE_PGE
)) {
246 cr4_set_bits_and_update_boot(X86_CR4_PGE
);
247 __supported_pte_mask
|= _PAGE_GLOBAL
;
250 /* By the default is everything supported: */
251 __default_kernel_pte_mask
= __supported_pte_mask
;
252 /* Except when with PTI where the kernel is mostly non-Global: */
253 if (cpu_feature_enabled(X86_FEATURE_PTI
))
254 __default_kernel_pte_mask
&= ~_PAGE_GLOBAL
;
256 /* Enable 1 GB linear kernel mappings if available: */
257 if (direct_gbpages
&& boot_cpu_has(X86_FEATURE_GBPAGES
)) {
258 printk(KERN_INFO
"Using GB pages for direct mapping\n");
259 page_size_mask
|= 1 << PG_LEVEL_1G
;
266 * INVLPG may not properly flush Global entries on
267 * these CPUs. New microcode fixes the issue.
269 static const struct x86_cpu_id invlpg_miss_ids
[] = {
270 X86_MATCH_VFM(INTEL_ALDERLAKE
, 0x2e),
271 X86_MATCH_VFM(INTEL_ALDERLAKE_L
, 0x42c),
272 X86_MATCH_VFM(INTEL_ATOM_GRACEMONT
, 0x11),
273 X86_MATCH_VFM(INTEL_RAPTORLAKE
, 0x118),
274 X86_MATCH_VFM(INTEL_RAPTORLAKE_P
, 0x4117),
275 X86_MATCH_VFM(INTEL_RAPTORLAKE_S
, 0x2e),
279 static void setup_pcid(void)
281 const struct x86_cpu_id
*invlpg_miss_match
;
283 if (!IS_ENABLED(CONFIG_X86_64
))
286 if (!boot_cpu_has(X86_FEATURE_PCID
))
289 invlpg_miss_match
= x86_match_cpu(invlpg_miss_ids
);
291 if (invlpg_miss_match
&&
292 boot_cpu_data
.microcode
< invlpg_miss_match
->driver_data
) {
293 pr_info("Incomplete global flushes, disabling PCID");
294 setup_clear_cpu_cap(X86_FEATURE_PCID
);
298 if (boot_cpu_has(X86_FEATURE_PGE
)) {
300 * This can't be cr4_set_bits_and_update_boot() -- the
301 * trampoline code can't handle CR4.PCIDE and it wouldn't
302 * do any good anyway. Despite the name,
303 * cr4_set_bits_and_update_boot() doesn't actually cause
304 * the bits in question to remain set all the way through
305 * the secondary boot asm.
307 * Instead, we brute-force it and set CR4.PCIDE manually in
310 cr4_set_bits(X86_CR4_PCIDE
);
313 * flush_tlb_all(), as currently implemented, won't work if
314 * PCID is on but PGE is not. Since that combination
315 * doesn't exist on real hardware, there's no reason to try
316 * to fully support it, but it's polite to avoid corrupting
317 * data if we're on an improperly configured VM.
319 setup_clear_cpu_cap(X86_FEATURE_PCID
);
324 #define NR_RANGE_MR 3
325 #else /* CONFIG_X86_64 */
326 #define NR_RANGE_MR 5
329 static int __meminit
save_mr(struct map_range
*mr
, int nr_range
,
330 unsigned long start_pfn
, unsigned long end_pfn
,
331 unsigned long page_size_mask
)
333 if (start_pfn
< end_pfn
) {
334 if (nr_range
>= NR_RANGE_MR
)
335 panic("run out of range for init_memory_mapping\n");
336 mr
[nr_range
].start
= start_pfn
<<PAGE_SHIFT
;
337 mr
[nr_range
].end
= end_pfn
<<PAGE_SHIFT
;
338 mr
[nr_range
].page_size_mask
= page_size_mask
;
346 * adjust the page_size_mask for small range to go with
347 * big page size instead small one if nearby are ram too.
349 static void __ref
adjust_range_page_size_mask(struct map_range
*mr
,
354 for (i
= 0; i
< nr_range
; i
++) {
355 if ((page_size_mask
& (1<<PG_LEVEL_2M
)) &&
356 !(mr
[i
].page_size_mask
& (1<<PG_LEVEL_2M
))) {
357 unsigned long start
= round_down(mr
[i
].start
, PMD_SIZE
);
358 unsigned long end
= round_up(mr
[i
].end
, PMD_SIZE
);
361 if ((end
>> PAGE_SHIFT
) > max_low_pfn
)
365 if (memblock_is_region_memory(start
, end
- start
))
366 mr
[i
].page_size_mask
|= 1<<PG_LEVEL_2M
;
368 if ((page_size_mask
& (1<<PG_LEVEL_1G
)) &&
369 !(mr
[i
].page_size_mask
& (1<<PG_LEVEL_1G
))) {
370 unsigned long start
= round_down(mr
[i
].start
, PUD_SIZE
);
371 unsigned long end
= round_up(mr
[i
].end
, PUD_SIZE
);
373 if (memblock_is_region_memory(start
, end
- start
))
374 mr
[i
].page_size_mask
|= 1<<PG_LEVEL_1G
;
379 static const char *page_size_string(struct map_range
*mr
)
381 static const char str_1g
[] = "1G";
382 static const char str_2m
[] = "2M";
383 static const char str_4m
[] = "4M";
384 static const char str_4k
[] = "4k";
386 if (mr
->page_size_mask
& (1<<PG_LEVEL_1G
))
389 * 32-bit without PAE has a 4M large page size.
390 * PG_LEVEL_2M is misnamed, but we can at least
391 * print out the right size in the string.
393 if (IS_ENABLED(CONFIG_X86_32
) &&
394 !IS_ENABLED(CONFIG_X86_PAE
) &&
395 mr
->page_size_mask
& (1<<PG_LEVEL_2M
))
398 if (mr
->page_size_mask
& (1<<PG_LEVEL_2M
))
404 static int __meminit
split_mem_range(struct map_range
*mr
, int nr_range
,
408 unsigned long start_pfn
, end_pfn
, limit_pfn
;
412 limit_pfn
= PFN_DOWN(end
);
414 /* head if not big page alignment ? */
415 pfn
= start_pfn
= PFN_DOWN(start
);
418 * Don't use a large page for the first 2/4MB of memory
419 * because there are often fixed size MTRRs in there
420 * and overlapping MTRRs into large pages can cause
424 end_pfn
= PFN_DOWN(PMD_SIZE
);
426 end_pfn
= round_up(pfn
, PFN_DOWN(PMD_SIZE
));
427 #else /* CONFIG_X86_64 */
428 end_pfn
= round_up(pfn
, PFN_DOWN(PMD_SIZE
));
430 if (end_pfn
> limit_pfn
)
432 if (start_pfn
< end_pfn
) {
433 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
, 0);
437 /* big page (2M) range */
438 start_pfn
= round_up(pfn
, PFN_DOWN(PMD_SIZE
));
440 end_pfn
= round_down(limit_pfn
, PFN_DOWN(PMD_SIZE
));
441 #else /* CONFIG_X86_64 */
442 end_pfn
= round_up(pfn
, PFN_DOWN(PUD_SIZE
));
443 if (end_pfn
> round_down(limit_pfn
, PFN_DOWN(PMD_SIZE
)))
444 end_pfn
= round_down(limit_pfn
, PFN_DOWN(PMD_SIZE
));
447 if (start_pfn
< end_pfn
) {
448 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
449 page_size_mask
& (1<<PG_LEVEL_2M
));
454 /* big page (1G) range */
455 start_pfn
= round_up(pfn
, PFN_DOWN(PUD_SIZE
));
456 end_pfn
= round_down(limit_pfn
, PFN_DOWN(PUD_SIZE
));
457 if (start_pfn
< end_pfn
) {
458 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
460 ((1<<PG_LEVEL_2M
)|(1<<PG_LEVEL_1G
)));
464 /* tail is not big page (1G) alignment */
465 start_pfn
= round_up(pfn
, PFN_DOWN(PMD_SIZE
));
466 end_pfn
= round_down(limit_pfn
, PFN_DOWN(PMD_SIZE
));
467 if (start_pfn
< end_pfn
) {
468 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
469 page_size_mask
& (1<<PG_LEVEL_2M
));
474 /* tail is not big page (2M) alignment */
477 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
, 0);
480 adjust_range_page_size_mask(mr
, nr_range
);
482 /* try to merge same page size and continuous */
483 for (i
= 0; nr_range
> 1 && i
< nr_range
- 1; i
++) {
484 unsigned long old_start
;
485 if (mr
[i
].end
!= mr
[i
+1].start
||
486 mr
[i
].page_size_mask
!= mr
[i
+1].page_size_mask
)
489 old_start
= mr
[i
].start
;
490 memmove(&mr
[i
], &mr
[i
+1],
491 (nr_range
- 1 - i
) * sizeof(struct map_range
));
492 mr
[i
--].start
= old_start
;
496 for (i
= 0; i
< nr_range
; i
++)
497 pr_debug(" [mem %#010lx-%#010lx] page %s\n",
498 mr
[i
].start
, mr
[i
].end
- 1,
499 page_size_string(&mr
[i
]));
504 struct range pfn_mapped
[E820_MAX_ENTRIES
];
507 static void add_pfn_range_mapped(unsigned long start_pfn
, unsigned long end_pfn
)
509 nr_pfn_mapped
= add_range_with_merge(pfn_mapped
, E820_MAX_ENTRIES
,
510 nr_pfn_mapped
, start_pfn
, end_pfn
);
511 nr_pfn_mapped
= clean_sort_range(pfn_mapped
, E820_MAX_ENTRIES
);
513 max_pfn_mapped
= max(max_pfn_mapped
, end_pfn
);
515 if (start_pfn
< (1UL<<(32-PAGE_SHIFT
)))
516 max_low_pfn_mapped
= max(max_low_pfn_mapped
,
517 min(end_pfn
, 1UL<<(32-PAGE_SHIFT
)));
520 bool pfn_range_is_mapped(unsigned long start_pfn
, unsigned long end_pfn
)
524 for (i
= 0; i
< nr_pfn_mapped
; i
++)
525 if ((start_pfn
>= pfn_mapped
[i
].start
) &&
526 (end_pfn
<= pfn_mapped
[i
].end
))
533 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
534 * This runs before bootmem is initialized and gets pages directly from
535 * the physical memory. To access them they are temporarily mapped.
537 unsigned long __ref
init_memory_mapping(unsigned long start
,
538 unsigned long end
, pgprot_t prot
)
540 struct map_range mr
[NR_RANGE_MR
];
541 unsigned long ret
= 0;
544 pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n",
547 memset(mr
, 0, sizeof(mr
));
548 nr_range
= split_mem_range(mr
, 0, start
, end
);
550 for (i
= 0; i
< nr_range
; i
++)
551 ret
= kernel_physical_mapping_init(mr
[i
].start
, mr
[i
].end
,
552 mr
[i
].page_size_mask
,
555 add_pfn_range_mapped(start
>> PAGE_SHIFT
, ret
>> PAGE_SHIFT
);
557 return ret
>> PAGE_SHIFT
;
561 * We need to iterate through the E820 memory map and create direct mappings
562 * for only E820_TYPE_RAM and E820_KERN_RESERVED regions. We cannot simply
563 * create direct mappings for all pfns from [0 to max_low_pfn) and
564 * [4GB to max_pfn) because of possible memory holes in high addresses
565 * that cannot be marked as UC by fixed/variable range MTRRs.
566 * Depending on the alignment of E820 ranges, this may possibly result
567 * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
569 * init_mem_mapping() calls init_range_memory_mapping() with big range.
570 * That range would have hole in the middle or ends, and only ram parts
571 * will be mapped in init_range_memory_mapping().
573 static unsigned long __init
init_range_memory_mapping(
574 unsigned long r_start
,
577 unsigned long start_pfn
, end_pfn
;
578 unsigned long mapped_ram_size
= 0;
581 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, NULL
) {
582 u64 start
= clamp_val(PFN_PHYS(start_pfn
), r_start
, r_end
);
583 u64 end
= clamp_val(PFN_PHYS(end_pfn
), r_start
, r_end
);
588 * if it is overlapping with brk pgt, we need to
589 * alloc pgt buf from memblock instead.
591 can_use_brk_pgt
= max(start
, (u64
)pgt_buf_end
<<PAGE_SHIFT
) >=
592 min(end
, (u64
)pgt_buf_top
<<PAGE_SHIFT
);
593 init_memory_mapping(start
, end
, PAGE_KERNEL
);
594 mapped_ram_size
+= end
- start
;
595 can_use_brk_pgt
= true;
598 return mapped_ram_size
;
601 static unsigned long __init
get_new_step_size(unsigned long step_size
)
604 * Initial mapped size is PMD_SIZE (2M).
605 * We can not set step_size to be PUD_SIZE (1G) yet.
606 * In worse case, when we cross the 1G boundary, and
607 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
608 * to map 1G range with PTE. Hence we use one less than the
609 * difference of page table level shifts.
611 * Don't need to worry about overflow in the top-down case, on 32bit,
612 * when step_size is 0, round_down() returns 0 for start, and that
613 * turns it into 0x100000000ULL.
614 * In the bottom-up case, round_up(x, 0) returns 0 though too, which
615 * needs to be taken into consideration by the code below.
617 return step_size
<< (PMD_SHIFT
- PAGE_SHIFT
- 1);
621 * memory_map_top_down - Map [map_start, map_end) top down
622 * @map_start: start address of the target memory range
623 * @map_end: end address of the target memory range
625 * This function will setup direct mapping for memory range
626 * [map_start, map_end) in top-down. That said, the page tables
627 * will be allocated at the end of the memory, and we map the
628 * memory in top-down.
630 static void __init
memory_map_top_down(unsigned long map_start
,
631 unsigned long map_end
)
633 unsigned long real_end
, last_start
;
634 unsigned long step_size
;
636 unsigned long mapped_ram_size
= 0;
639 * Systems that have many reserved areas near top of the memory,
640 * e.g. QEMU with less than 1G RAM and EFI enabled, or Xen, will
641 * require lots of 4K mappings which may exhaust pgt_buf.
642 * Start with top-most PMD_SIZE range aligned at PMD_SIZE to ensure
643 * there is enough mapped memory that can be allocated from
646 addr
= memblock_phys_alloc_range(PMD_SIZE
, PMD_SIZE
, map_start
,
648 memblock_phys_free(addr
, PMD_SIZE
);
649 real_end
= addr
+ PMD_SIZE
;
651 /* step_size need to be small so pgt_buf from BRK could cover it */
652 step_size
= PMD_SIZE
;
653 max_pfn_mapped
= 0; /* will get exact value next */
654 min_pfn_mapped
= real_end
>> PAGE_SHIFT
;
655 last_start
= real_end
;
658 * We start from the top (end of memory) and go to the bottom.
659 * The memblock_find_in_range() gets us a block of RAM from the
660 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
663 while (last_start
> map_start
) {
666 if (last_start
> step_size
) {
667 start
= round_down(last_start
- 1, step_size
);
668 if (start
< map_start
)
672 mapped_ram_size
+= init_range_memory_mapping(start
,
675 min_pfn_mapped
= last_start
>> PAGE_SHIFT
;
676 if (mapped_ram_size
>= step_size
)
677 step_size
= get_new_step_size(step_size
);
680 if (real_end
< map_end
)
681 init_range_memory_mapping(real_end
, map_end
);
685 * memory_map_bottom_up - Map [map_start, map_end) bottom up
686 * @map_start: start address of the target memory range
687 * @map_end: end address of the target memory range
689 * This function will setup direct mapping for memory range
690 * [map_start, map_end) in bottom-up. Since we have limited the
691 * bottom-up allocation above the kernel, the page tables will
692 * be allocated just above the kernel and we map the memory
693 * in [map_start, map_end) in bottom-up.
695 static void __init
memory_map_bottom_up(unsigned long map_start
,
696 unsigned long map_end
)
698 unsigned long next
, start
;
699 unsigned long mapped_ram_size
= 0;
700 /* step_size need to be small so pgt_buf from BRK could cover it */
701 unsigned long step_size
= PMD_SIZE
;
704 min_pfn_mapped
= start
>> PAGE_SHIFT
;
707 * We start from the bottom (@map_start) and go to the top (@map_end).
708 * The memblock_find_in_range() gets us a block of RAM from the
709 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
712 while (start
< map_end
) {
713 if (step_size
&& map_end
- start
> step_size
) {
714 next
= round_up(start
+ 1, step_size
);
721 mapped_ram_size
+= init_range_memory_mapping(start
, next
);
724 if (mapped_ram_size
>= step_size
)
725 step_size
= get_new_step_size(step_size
);
730 * The real mode trampoline, which is required for bootstrapping CPUs
731 * occupies only a small area under the low 1MB. See reserve_real_mode()
734 * If KASLR is disabled the first PGD entry of the direct mapping is copied
735 * to map the real mode trampoline.
737 * If KASLR is enabled, copy only the PUD which covers the low 1MB
738 * area. This limits the randomization granularity to 1GB for both 4-level
739 * and 5-level paging.
741 static void __init
init_trampoline(void)
745 * The code below will alias kernel page-tables in the user-range of the
746 * address space, including the Global bit. So global TLB entries will
747 * be created when using the trampoline page-table.
749 if (!kaslr_memory_enabled())
750 trampoline_pgd_entry
= init_top_pgt
[pgd_index(__PAGE_OFFSET
)];
752 init_trampoline_kaslr();
756 void __init
init_mem_mapping(void)
760 pti_check_boottime_disable();
761 probe_page_size_mask();
765 end
= max_pfn
<< PAGE_SHIFT
;
767 end
= max_low_pfn
<< PAGE_SHIFT
;
770 /* the ISA range is always mapped regardless of memory holes */
771 init_memory_mapping(0, ISA_END_ADDRESS
, PAGE_KERNEL
);
773 /* Init the trampoline, possibly with KASLR memory offset */
777 * If the allocation is in bottom-up direction, we setup direct mapping
778 * in bottom-up, otherwise we setup direct mapping in top-down.
780 if (memblock_bottom_up()) {
781 unsigned long kernel_end
= __pa_symbol(_end
);
784 * we need two separate calls here. This is because we want to
785 * allocate page tables above the kernel. So we first map
786 * [kernel_end, end) to make memory above the kernel be mapped
787 * as soon as possible. And then use page tables allocated above
788 * the kernel to map [ISA_END_ADDRESS, kernel_end).
790 memory_map_bottom_up(kernel_end
, end
);
791 memory_map_bottom_up(ISA_END_ADDRESS
, kernel_end
);
793 memory_map_top_down(ISA_END_ADDRESS
, end
);
797 if (max_pfn
> max_low_pfn
) {
798 /* can we preserve max_low_pfn ?*/
799 max_low_pfn
= max_pfn
;
802 early_ioremap_page_table_range_init();
805 load_cr3(swapper_pg_dir
);
808 x86_init
.hyper
.init_mem_mapping();
810 early_memtest(0, max_pfn_mapped
<< PAGE_SHIFT
);
814 * Initialize an mm_struct to be used during poking and a pointer to be used
817 void __init
poking_init(void)
822 poking_mm
= mm_alloc();
825 /* Xen PV guests need the PGD to be pinned. */
826 paravirt_enter_mmap(poking_mm
);
829 * Randomize the poking address, but make sure that the following page
830 * will be mapped at the same PMD. We need 2 pages, so find space for 3,
831 * and adjust the address if the PMD ends after the first one.
833 poking_addr
= TASK_UNMAPPED_BASE
;
834 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE
))
835 poking_addr
+= (kaslr_get_random_long("Poking") & PAGE_MASK
) %
836 (TASK_SIZE
- TASK_UNMAPPED_BASE
- 3 * PAGE_SIZE
);
838 if (((poking_addr
+ PAGE_SIZE
) & ~PMD_MASK
) == 0)
839 poking_addr
+= PAGE_SIZE
;
842 * We need to trigger the allocation of the page-tables that will be
843 * needed for poking now. Later, poking may be performed in an atomic
844 * section, which might cause allocation to fail.
846 ptep
= get_locked_pte(poking_mm
, poking_addr
, &ptl
);
848 pte_unmap_unlock(ptep
, ptl
);
852 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
853 * is valid. The argument is a physical page number.
855 * On x86, access has to be given to the first megabyte of RAM because that
856 * area traditionally contains BIOS code and data regions used by X, dosemu,
857 * and similar apps. Since they map the entire memory range, the whole range
858 * must be allowed (for mapping), but any areas that would otherwise be
859 * disallowed are flagged as being "zero filled" instead of rejected.
860 * Access has to be given to non-kernel-ram areas as well, these contain the
861 * PCI mmio resources as well as potential bios/acpi data regions.
863 int devmem_is_allowed(unsigned long pagenr
)
865 if (region_intersects(PFN_PHYS(pagenr
), PAGE_SIZE
,
866 IORESOURCE_SYSTEM_RAM
, IORES_DESC_NONE
)
867 != REGION_DISJOINT
) {
869 * For disallowed memory regions in the low 1MB range,
870 * request that the page be shown as all zeros.
879 * This must follow RAM test, since System RAM is considered a
880 * restricted resource under CONFIG_STRICT_DEVMEM.
882 if (iomem_is_exclusive(pagenr
<< PAGE_SHIFT
)) {
883 /* Low 1MB bypasses iomem restrictions. */
893 void free_init_pages(const char *what
, unsigned long begin
, unsigned long end
)
895 unsigned long begin_aligned
, end_aligned
;
897 /* Make sure boundaries are page aligned */
898 begin_aligned
= PAGE_ALIGN(begin
);
899 end_aligned
= end
& PAGE_MASK
;
901 if (WARN_ON(begin_aligned
!= begin
|| end_aligned
!= end
)) {
902 begin
= begin_aligned
;
910 * If debugging page accesses then do not free this memory but
911 * mark them not present - any buggy init-section access will
912 * create a kernel page fault:
914 if (debug_pagealloc_enabled()) {
915 pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
918 * Inform kmemleak about the hole in the memory since the
919 * corresponding pages will be unmapped.
921 kmemleak_free_part((void *)begin
, end
- begin
);
922 set_memory_np(begin
, (end
- begin
) >> PAGE_SHIFT
);
925 * We just marked the kernel text read only above, now that
926 * we are going to free part of that, we need to make that
927 * writeable and non-executable first.
929 set_memory_nx(begin
, (end
- begin
) >> PAGE_SHIFT
);
930 set_memory_rw(begin
, (end
- begin
) >> PAGE_SHIFT
);
932 free_reserved_area((void *)begin
, (void *)end
,
933 POISON_FREE_INITMEM
, what
);
938 * begin/end can be in the direct map or the "high kernel mapping"
939 * used for the kernel image only. free_init_pages() will do the
940 * right thing for either kind of address.
942 void free_kernel_image_pages(const char *what
, void *begin
, void *end
)
944 unsigned long begin_ul
= (unsigned long)begin
;
945 unsigned long end_ul
= (unsigned long)end
;
946 unsigned long len_pages
= (end_ul
- begin_ul
) >> PAGE_SHIFT
;
948 free_init_pages(what
, begin_ul
, end_ul
);
951 * PTI maps some of the kernel into userspace. For performance,
952 * this includes some kernel areas that do not contain secrets.
953 * Those areas might be adjacent to the parts of the kernel image
954 * being freed, which may contain secrets. Remove the "high kernel
955 * image mapping" for these freed areas, ensuring they are not even
956 * potentially vulnerable to Meltdown regardless of the specific
957 * optimizations PTI is currently using.
959 * The "noalias" prevents unmapping the direct map alias which is
960 * needed to access the freed pages.
962 * This is only valid for 64bit kernels. 32bit has only one mapping
963 * which can't be treated in this way for obvious reasons.
965 if (IS_ENABLED(CONFIG_X86_64
) && cpu_feature_enabled(X86_FEATURE_PTI
))
966 set_memory_np_noalias(begin_ul
, len_pages
);
969 void __ref
free_initmem(void)
971 e820__reallocate_tables();
973 mem_encrypt_free_decrypted_mem();
975 free_kernel_image_pages("unused kernel image (initmem)",
976 &__init_begin
, &__init_end
);
979 #ifdef CONFIG_BLK_DEV_INITRD
980 void __init
free_initrd_mem(unsigned long start
, unsigned long end
)
983 * end could be not aligned, and We can not align that,
984 * decompressor could be confused by aligned initrd_end
985 * We already reserve the end partial page before in
986 * - i386_start_kernel()
987 * - x86_64_start_kernel()
988 * - relocate_initrd()
989 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
991 free_init_pages("initrd", start
, PAGE_ALIGN(end
));
995 void __init
zone_sizes_init(void)
997 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
999 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
1001 #ifdef CONFIG_ZONE_DMA
1002 max_zone_pfns
[ZONE_DMA
] = min(MAX_DMA_PFN
, max_low_pfn
);
1004 #ifdef CONFIG_ZONE_DMA32
1005 max_zone_pfns
[ZONE_DMA32
] = min(MAX_DMA32_PFN
, max_low_pfn
);
1007 max_zone_pfns
[ZONE_NORMAL
] = max_low_pfn
;
1008 #ifdef CONFIG_HIGHMEM
1009 max_zone_pfns
[ZONE_HIGHMEM
] = max_pfn
;
1012 free_area_init(max_zone_pfns
);
1015 __visible
DEFINE_PER_CPU_ALIGNED(struct tlb_state
, cpu_tlbstate
) = {
1016 .loaded_mm
= &init_mm
,
1018 .cr4
= ~0UL, /* fail hard if we screw up cr4 shadow initialization */
1021 #ifdef CONFIG_ADDRESS_MASKING
1022 DEFINE_PER_CPU(u64
, tlbstate_untag_mask
);
1023 EXPORT_PER_CPU_SYMBOL(tlbstate_untag_mask
);
1026 void update_cache_mode_entry(unsigned entry
, enum page_cache_mode cache
)
1028 /* entry 0 MUST be WB (hardwired to speed up translations) */
1029 BUG_ON(!entry
&& cache
!= _PAGE_CACHE_MODE_WB
);
1031 __cachemode2pte_tbl
[cache
] = __cm_idx2pte(entry
);
1032 __pte2cachemode_tbl
[entry
] = cache
;
1036 unsigned long arch_max_swapfile_size(void)
1038 unsigned long pages
;
1040 pages
= generic_max_swapfile_size();
1042 if (boot_cpu_has_bug(X86_BUG_L1TF
) && l1tf_mitigation
!= L1TF_MITIGATION_OFF
) {
1043 /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
1044 unsigned long long l1tf_limit
= l1tf_pfn_limit();
1046 * We encode swap offsets also with 3 bits below those for pfn
1047 * which makes the usable limit higher.
1049 #if CONFIG_PGTABLE_LEVELS > 2
1050 l1tf_limit
<<= PAGE_SHIFT
- SWP_OFFSET_FIRST_BIT
;
1052 pages
= min_t(unsigned long long, l1tf_limit
, pages
);
1058 #ifdef CONFIG_EXECMEM
1059 static struct execmem_info execmem_info __ro_after_init
;
1061 #ifdef CONFIG_ARCH_HAS_EXECMEM_ROX
1062 void execmem_fill_trapping_insns(void *ptr
, size_t size
, bool writeable
)
1064 /* fill memory with INT3 instructions */
1066 memset(ptr
, INT3_INSN_OPCODE
, size
);
1068 text_poke_set(ptr
, INT3_INSN_OPCODE
, size
);
1072 struct execmem_info __init
*execmem_arch_setup(void)
1074 unsigned long start
, offset
= 0;
1075 enum execmem_range_flags flags
;
1078 if (kaslr_enabled())
1079 offset
= get_random_u32_inclusive(1, 1024) * PAGE_SIZE
;
1081 start
= MODULES_VADDR
+ offset
;
1083 if (IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX
)) {
1084 pgprot
= PAGE_KERNEL_ROX
;
1085 flags
= EXECMEM_KASAN_SHADOW
| EXECMEM_ROX_CACHE
;
1087 pgprot
= PAGE_KERNEL
;
1088 flags
= EXECMEM_KASAN_SHADOW
;
1091 execmem_info
= (struct execmem_info
){
1093 [EXECMEM_MODULE_TEXT
] = {
1098 .alignment
= MODULE_ALIGN
,
1100 [EXECMEM_KPROBES
... EXECMEM_BPF
] = {
1101 .flags
= EXECMEM_KASAN_SHADOW
,
1104 .pgprot
= PAGE_KERNEL
,
1105 .alignment
= MODULE_ALIGN
,
1107 [EXECMEM_MODULE_DATA
] = {
1108 .flags
= EXECMEM_KASAN_SHADOW
,
1111 .pgprot
= PAGE_KERNEL
,
1112 .alignment
= MODULE_ALIGN
,
1117 return &execmem_info
;
1119 #endif /* CONFIG_EXECMEM */