drm/ast: Only warn about unsupported TX chips on Gen4 and later
[drm/drm-misc.git] / mm / mm_init.c
blob24b68b425afb1bac00caa9ceb7b8385d49ddb01c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm_init.c - Memory initialisation verification and debugging
5 * Copyright 2008 IBM Corporation, 2008
6 * Author Mel Gorman <mel@csn.ul.ie>
8 */
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/kobject.h>
12 #include <linux/export.h>
13 #include <linux/memory.h>
14 #include <linux/notifier.h>
15 #include <linux/sched.h>
16 #include <linux/mman.h>
17 #include <linux/memblock.h>
18 #include <linux/page-isolation.h>
19 #include <linux/padata.h>
20 #include <linux/nmi.h>
21 #include <linux/buffer_head.h>
22 #include <linux/kmemleak.h>
23 #include <linux/kfence.h>
24 #include <linux/page_ext.h>
25 #include <linux/pti.h>
26 #include <linux/pgtable.h>
27 #include <linux/stackdepot.h>
28 #include <linux/swap.h>
29 #include <linux/cma.h>
30 #include <linux/crash_dump.h>
31 #include <linux/execmem.h>
32 #include <linux/vmstat.h>
33 #include "internal.h"
34 #include "slab.h"
35 #include "shuffle.h"
37 #include <asm/setup.h>
39 #ifdef CONFIG_DEBUG_MEMORY_INIT
40 int __meminitdata mminit_loglevel;
42 /* The zonelists are simply reported, validation is manual. */
43 void __init mminit_verify_zonelist(void)
45 int nid;
47 if (mminit_loglevel < MMINIT_VERIFY)
48 return;
50 for_each_online_node(nid) {
51 pg_data_t *pgdat = NODE_DATA(nid);
52 struct zone *zone;
53 struct zoneref *z;
54 struct zonelist *zonelist;
55 int i, listid, zoneid;
57 for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
59 /* Identify the zone and nodelist */
60 zoneid = i % MAX_NR_ZONES;
61 listid = i / MAX_NR_ZONES;
62 zonelist = &pgdat->node_zonelists[listid];
63 zone = &pgdat->node_zones[zoneid];
64 if (!populated_zone(zone))
65 continue;
67 /* Print information about the zonelist */
68 printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
69 listid > 0 ? "thisnode" : "general", nid,
70 zone->name);
72 /* Iterate the zonelist */
73 for_each_zone_zonelist(zone, z, zonelist, zoneid)
74 pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
75 pr_cont("\n");
80 void __init mminit_verify_pageflags_layout(void)
82 int shift, width;
83 unsigned long or_mask, add_mask;
85 shift = BITS_PER_LONG;
86 width = shift - NR_NON_PAGEFLAG_BITS;
87 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
88 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
89 SECTIONS_WIDTH,
90 NODES_WIDTH,
91 ZONES_WIDTH,
92 LAST_CPUPID_WIDTH,
93 KASAN_TAG_WIDTH,
94 LRU_GEN_WIDTH,
95 LRU_REFS_WIDTH,
96 NR_PAGEFLAGS);
97 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
98 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
99 SECTIONS_SHIFT,
100 NODES_SHIFT,
101 ZONES_SHIFT,
102 LAST_CPUPID_SHIFT,
103 KASAN_TAG_WIDTH);
104 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
105 "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
106 (unsigned long)SECTIONS_PGSHIFT,
107 (unsigned long)NODES_PGSHIFT,
108 (unsigned long)ZONES_PGSHIFT,
109 (unsigned long)LAST_CPUPID_PGSHIFT,
110 (unsigned long)KASAN_TAG_PGSHIFT);
111 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
112 "Node/Zone ID: %lu -> %lu\n",
113 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
114 (unsigned long)ZONEID_PGOFF);
115 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
116 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
117 shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
118 #ifdef NODE_NOT_IN_PAGE_FLAGS
119 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
120 "Node not in page flags");
121 #endif
122 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
123 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
124 "Last cpupid not in page flags");
125 #endif
127 if (SECTIONS_WIDTH) {
128 shift -= SECTIONS_WIDTH;
129 BUG_ON(shift != SECTIONS_PGSHIFT);
131 if (NODES_WIDTH) {
132 shift -= NODES_WIDTH;
133 BUG_ON(shift != NODES_PGSHIFT);
135 if (ZONES_WIDTH) {
136 shift -= ZONES_WIDTH;
137 BUG_ON(shift != ZONES_PGSHIFT);
140 /* Check for bitmask overlaps */
141 or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
142 (NODES_MASK << NODES_PGSHIFT) |
143 (SECTIONS_MASK << SECTIONS_PGSHIFT);
144 add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
145 (NODES_MASK << NODES_PGSHIFT) +
146 (SECTIONS_MASK << SECTIONS_PGSHIFT);
147 BUG_ON(or_mask != add_mask);
150 static __init int set_mminit_loglevel(char *str)
152 get_option(&str, &mminit_loglevel);
153 return 0;
155 early_param("mminit_loglevel", set_mminit_loglevel);
156 #endif /* CONFIG_DEBUG_MEMORY_INIT */
158 struct kobject *mm_kobj;
160 #ifdef CONFIG_SMP
161 s32 vm_committed_as_batch = 32;
163 void mm_compute_batch(int overcommit_policy)
165 u64 memsized_batch;
166 s32 nr = num_present_cpus();
167 s32 batch = max_t(s32, nr*2, 32);
168 unsigned long ram_pages = totalram_pages();
171 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
172 * (total memory/#cpus), and lift it to 25% for other policies
173 * to easy the possible lock contention for percpu_counter
174 * vm_committed_as, while the max limit is INT_MAX
176 if (overcommit_policy == OVERCOMMIT_NEVER)
177 memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
178 else
179 memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
181 vm_committed_as_batch = max_t(s32, memsized_batch, batch);
184 static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
185 unsigned long action, void *arg)
187 switch (action) {
188 case MEM_ONLINE:
189 case MEM_OFFLINE:
190 mm_compute_batch(sysctl_overcommit_memory);
191 break;
192 default:
193 break;
195 return NOTIFY_OK;
198 static int __init mm_compute_batch_init(void)
200 mm_compute_batch(sysctl_overcommit_memory);
201 hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI);
202 return 0;
205 __initcall(mm_compute_batch_init);
207 #endif
209 static int __init mm_sysfs_init(void)
211 mm_kobj = kobject_create_and_add("mm", kernel_kobj);
212 if (!mm_kobj)
213 return -ENOMEM;
215 return 0;
217 postcore_initcall(mm_sysfs_init);
219 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
220 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
221 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
223 static unsigned long required_kernelcore __initdata;
224 static unsigned long required_kernelcore_percent __initdata;
225 static unsigned long required_movablecore __initdata;
226 static unsigned long required_movablecore_percent __initdata;
228 static unsigned long nr_kernel_pages __initdata;
229 static unsigned long nr_all_pages __initdata;
231 static bool deferred_struct_pages __meminitdata;
233 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
235 static int __init cmdline_parse_core(char *p, unsigned long *core,
236 unsigned long *percent)
238 unsigned long long coremem;
239 char *endptr;
241 if (!p)
242 return -EINVAL;
244 /* Value may be a percentage of total memory, otherwise bytes */
245 coremem = simple_strtoull(p, &endptr, 0);
246 if (*endptr == '%') {
247 /* Paranoid check for percent values greater than 100 */
248 WARN_ON(coremem > 100);
250 *percent = coremem;
251 } else {
252 coremem = memparse(p, &p);
253 /* Paranoid check that UL is enough for the coremem value */
254 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
256 *core = coremem >> PAGE_SHIFT;
257 *percent = 0UL;
259 return 0;
262 bool mirrored_kernelcore __initdata_memblock;
265 * kernelcore=size sets the amount of memory for use for allocations that
266 * cannot be reclaimed or migrated.
268 static int __init cmdline_parse_kernelcore(char *p)
270 /* parse kernelcore=mirror */
271 if (parse_option_str(p, "mirror")) {
272 mirrored_kernelcore = true;
273 return 0;
276 return cmdline_parse_core(p, &required_kernelcore,
277 &required_kernelcore_percent);
279 early_param("kernelcore", cmdline_parse_kernelcore);
282 * movablecore=size sets the amount of memory for use for allocations that
283 * can be reclaimed or migrated.
285 static int __init cmdline_parse_movablecore(char *p)
287 return cmdline_parse_core(p, &required_movablecore,
288 &required_movablecore_percent);
290 early_param("movablecore", cmdline_parse_movablecore);
293 * early_calculate_totalpages()
294 * Sum pages in active regions for movable zone.
295 * Populate N_MEMORY for calculating usable_nodes.
297 static unsigned long __init early_calculate_totalpages(void)
299 unsigned long totalpages = 0;
300 unsigned long start_pfn, end_pfn;
301 int i, nid;
303 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
304 unsigned long pages = end_pfn - start_pfn;
306 totalpages += pages;
307 if (pages)
308 node_set_state(nid, N_MEMORY);
310 return totalpages;
314 * This finds a zone that can be used for ZONE_MOVABLE pages. The
315 * assumption is made that zones within a node are ordered in monotonic
316 * increasing memory addresses so that the "highest" populated zone is used
318 static void __init find_usable_zone_for_movable(void)
320 int zone_index;
321 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
322 if (zone_index == ZONE_MOVABLE)
323 continue;
325 if (arch_zone_highest_possible_pfn[zone_index] >
326 arch_zone_lowest_possible_pfn[zone_index])
327 break;
330 VM_BUG_ON(zone_index == -1);
331 movable_zone = zone_index;
335 * Find the PFN the Movable zone begins in each node. Kernel memory
336 * is spread evenly between nodes as long as the nodes have enough
337 * memory. When they don't, some nodes will have more kernelcore than
338 * others
340 static void __init find_zone_movable_pfns_for_nodes(void)
342 int i, nid;
343 unsigned long usable_startpfn;
344 unsigned long kernelcore_node, kernelcore_remaining;
345 /* save the state before borrow the nodemask */
346 nodemask_t saved_node_state = node_states[N_MEMORY];
347 unsigned long totalpages = early_calculate_totalpages();
348 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
349 struct memblock_region *r;
351 /* Need to find movable_zone earlier when movable_node is specified. */
352 find_usable_zone_for_movable();
355 * If movable_node is specified, ignore kernelcore and movablecore
356 * options.
358 if (movable_node_is_enabled()) {
359 for_each_mem_region(r) {
360 if (!memblock_is_hotpluggable(r))
361 continue;
363 nid = memblock_get_region_node(r);
365 usable_startpfn = memblock_region_memory_base_pfn(r);
366 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
367 min(usable_startpfn, zone_movable_pfn[nid]) :
368 usable_startpfn;
371 goto out2;
375 * If kernelcore=mirror is specified, ignore movablecore option
377 if (mirrored_kernelcore) {
378 bool mem_below_4gb_not_mirrored = false;
380 if (!memblock_has_mirror()) {
381 pr_warn("The system has no mirror memory, ignore kernelcore=mirror.\n");
382 goto out;
385 if (is_kdump_kernel()) {
386 pr_warn("The system is under kdump, ignore kernelcore=mirror.\n");
387 goto out;
390 for_each_mem_region(r) {
391 if (memblock_is_mirror(r))
392 continue;
394 nid = memblock_get_region_node(r);
396 usable_startpfn = memblock_region_memory_base_pfn(r);
398 if (usable_startpfn < PHYS_PFN(SZ_4G)) {
399 mem_below_4gb_not_mirrored = true;
400 continue;
403 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
404 min(usable_startpfn, zone_movable_pfn[nid]) :
405 usable_startpfn;
408 if (mem_below_4gb_not_mirrored)
409 pr_warn("This configuration results in unmirrored kernel memory.\n");
411 goto out2;
415 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
416 * amount of necessary memory.
418 if (required_kernelcore_percent)
419 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
420 10000UL;
421 if (required_movablecore_percent)
422 required_movablecore = (totalpages * 100 * required_movablecore_percent) /
423 10000UL;
426 * If movablecore= was specified, calculate what size of
427 * kernelcore that corresponds so that memory usable for
428 * any allocation type is evenly spread. If both kernelcore
429 * and movablecore are specified, then the value of kernelcore
430 * will be used for required_kernelcore if it's greater than
431 * what movablecore would have allowed.
433 if (required_movablecore) {
434 unsigned long corepages;
437 * Round-up so that ZONE_MOVABLE is at least as large as what
438 * was requested by the user
440 required_movablecore =
441 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
442 required_movablecore = min(totalpages, required_movablecore);
443 corepages = totalpages - required_movablecore;
445 required_kernelcore = max(required_kernelcore, corepages);
449 * If kernelcore was not specified or kernelcore size is larger
450 * than totalpages, there is no ZONE_MOVABLE.
452 if (!required_kernelcore || required_kernelcore >= totalpages)
453 goto out;
455 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
456 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
458 restart:
459 /* Spread kernelcore memory as evenly as possible throughout nodes */
460 kernelcore_node = required_kernelcore / usable_nodes;
461 for_each_node_state(nid, N_MEMORY) {
462 unsigned long start_pfn, end_pfn;
465 * Recalculate kernelcore_node if the division per node
466 * now exceeds what is necessary to satisfy the requested
467 * amount of memory for the kernel
469 if (required_kernelcore < kernelcore_node)
470 kernelcore_node = required_kernelcore / usable_nodes;
473 * As the map is walked, we track how much memory is usable
474 * by the kernel using kernelcore_remaining. When it is
475 * 0, the rest of the node is usable by ZONE_MOVABLE
477 kernelcore_remaining = kernelcore_node;
479 /* Go through each range of PFNs within this node */
480 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
481 unsigned long size_pages;
483 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
484 if (start_pfn >= end_pfn)
485 continue;
487 /* Account for what is only usable for kernelcore */
488 if (start_pfn < usable_startpfn) {
489 unsigned long kernel_pages;
490 kernel_pages = min(end_pfn, usable_startpfn)
491 - start_pfn;
493 kernelcore_remaining -= min(kernel_pages,
494 kernelcore_remaining);
495 required_kernelcore -= min(kernel_pages,
496 required_kernelcore);
498 /* Continue if range is now fully accounted */
499 if (end_pfn <= usable_startpfn) {
502 * Push zone_movable_pfn to the end so
503 * that if we have to rebalance
504 * kernelcore across nodes, we will
505 * not double account here
507 zone_movable_pfn[nid] = end_pfn;
508 continue;
510 start_pfn = usable_startpfn;
514 * The usable PFN range for ZONE_MOVABLE is from
515 * start_pfn->end_pfn. Calculate size_pages as the
516 * number of pages used as kernelcore
518 size_pages = end_pfn - start_pfn;
519 if (size_pages > kernelcore_remaining)
520 size_pages = kernelcore_remaining;
521 zone_movable_pfn[nid] = start_pfn + size_pages;
524 * Some kernelcore has been met, update counts and
525 * break if the kernelcore for this node has been
526 * satisfied
528 required_kernelcore -= min(required_kernelcore,
529 size_pages);
530 kernelcore_remaining -= size_pages;
531 if (!kernelcore_remaining)
532 break;
537 * If there is still required_kernelcore, we do another pass with one
538 * less node in the count. This will push zone_movable_pfn[nid] further
539 * along on the nodes that still have memory until kernelcore is
540 * satisfied
542 usable_nodes--;
543 if (usable_nodes && required_kernelcore > usable_nodes)
544 goto restart;
546 out2:
547 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
548 for (nid = 0; nid < MAX_NUMNODES; nid++) {
549 unsigned long start_pfn, end_pfn;
551 zone_movable_pfn[nid] =
552 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
554 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
555 if (zone_movable_pfn[nid] >= end_pfn)
556 zone_movable_pfn[nid] = 0;
559 out:
560 /* restore the node_state */
561 node_states[N_MEMORY] = saved_node_state;
564 void __meminit __init_single_page(struct page *page, unsigned long pfn,
565 unsigned long zone, int nid)
567 mm_zero_struct_page(page);
568 set_page_links(page, zone, nid, pfn);
569 init_page_count(page);
570 atomic_set(&page->_mapcount, -1);
571 page_cpupid_reset_last(page);
572 page_kasan_tag_reset(page);
574 INIT_LIST_HEAD(&page->lru);
575 #ifdef WANT_PAGE_VIRTUAL
576 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
577 if (!is_highmem_idx(zone))
578 set_page_address(page, __va(pfn << PAGE_SHIFT));
579 #endif
582 #ifdef CONFIG_NUMA
584 * During memory init memblocks map pfns to nids. The search is expensive and
585 * this caches recent lookups. The implementation of __early_pfn_to_nid
586 * treats start/end as pfns.
588 struct mminit_pfnnid_cache {
589 unsigned long last_start;
590 unsigned long last_end;
591 int last_nid;
594 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
597 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
599 static int __meminit __early_pfn_to_nid(unsigned long pfn,
600 struct mminit_pfnnid_cache *state)
602 unsigned long start_pfn, end_pfn;
603 int nid;
605 if (state->last_start <= pfn && pfn < state->last_end)
606 return state->last_nid;
608 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
609 if (nid != NUMA_NO_NODE) {
610 state->last_start = start_pfn;
611 state->last_end = end_pfn;
612 state->last_nid = nid;
615 return nid;
618 int __meminit early_pfn_to_nid(unsigned long pfn)
620 static DEFINE_SPINLOCK(early_pfn_lock);
621 int nid;
623 spin_lock(&early_pfn_lock);
624 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
625 if (nid < 0)
626 nid = first_online_node;
627 spin_unlock(&early_pfn_lock);
629 return nid;
632 int hashdist = HASHDIST_DEFAULT;
634 static int __init set_hashdist(char *str)
636 if (!str)
637 return 0;
638 hashdist = simple_strtoul(str, &str, 0);
639 return 1;
641 __setup("hashdist=", set_hashdist);
643 static inline void fixup_hashdist(void)
645 if (num_node_state(N_MEMORY) == 1)
646 hashdist = 0;
648 #else
649 static inline void fixup_hashdist(void) {}
650 #endif /* CONFIG_NUMA */
652 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
653 static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
655 pgdat->first_deferred_pfn = ULONG_MAX;
658 /* Returns true if the struct page for the pfn is initialised */
659 static inline bool __meminit early_page_initialised(unsigned long pfn, int nid)
661 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
662 return false;
664 return true;
668 * Returns true when the remaining initialisation should be deferred until
669 * later in the boot cycle when it can be parallelised.
671 static bool __meminit
672 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
674 static unsigned long prev_end_pfn, nr_initialised;
676 if (early_page_ext_enabled())
677 return false;
679 /* Always populate low zones for address-constrained allocations */
680 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
681 return false;
683 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
684 return true;
687 * prev_end_pfn static that contains the end of previous zone
688 * No need to protect because called very early in boot before smp_init.
690 if (prev_end_pfn != end_pfn) {
691 prev_end_pfn = end_pfn;
692 nr_initialised = 0;
696 * We start only with one section of pages, more pages are added as
697 * needed until the rest of deferred pages are initialized.
699 nr_initialised++;
700 if ((nr_initialised > PAGES_PER_SECTION) &&
701 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
702 NODE_DATA(nid)->first_deferred_pfn = pfn;
703 return true;
705 return false;
708 static void __meminit init_reserved_page(unsigned long pfn, int nid)
710 pg_data_t *pgdat;
711 int zid;
713 if (early_page_initialised(pfn, nid))
714 return;
716 pgdat = NODE_DATA(nid);
718 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
719 struct zone *zone = &pgdat->node_zones[zid];
721 if (zone_spans_pfn(zone, pfn))
722 break;
724 __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
726 if (pageblock_aligned(pfn))
727 set_pageblock_migratetype(pfn_to_page(pfn), MIGRATE_MOVABLE);
729 #else
730 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
732 static inline bool early_page_initialised(unsigned long pfn, int nid)
734 return true;
737 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
739 return false;
742 static inline void init_reserved_page(unsigned long pfn, int nid)
745 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
748 * Initialised pages do not have PageReserved set. This function is
749 * called for each range allocated by the bootmem allocator and
750 * marks the pages PageReserved. The remaining valid pages are later
751 * sent to the buddy page allocator.
753 void __meminit reserve_bootmem_region(phys_addr_t start,
754 phys_addr_t end, int nid)
756 unsigned long start_pfn = PFN_DOWN(start);
757 unsigned long end_pfn = PFN_UP(end);
759 for (; start_pfn < end_pfn; start_pfn++) {
760 if (pfn_valid(start_pfn)) {
761 struct page *page = pfn_to_page(start_pfn);
763 init_reserved_page(start_pfn, nid);
766 * no need for atomic set_bit because the struct
767 * page is not visible yet so nobody should
768 * access it yet.
770 __SetPageReserved(page);
775 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
776 static bool __meminit
777 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
779 static struct memblock_region *r;
781 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
782 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
783 for_each_mem_region(r) {
784 if (*pfn < memblock_region_memory_end_pfn(r))
785 break;
788 if (*pfn >= memblock_region_memory_base_pfn(r) &&
789 memblock_is_mirror(r)) {
790 *pfn = memblock_region_memory_end_pfn(r);
791 return true;
794 return false;
798 * Only struct pages that correspond to ranges defined by memblock.memory
799 * are zeroed and initialized by going through __init_single_page() during
800 * memmap_init_zone_range().
802 * But, there could be struct pages that correspond to holes in
803 * memblock.memory. This can happen because of the following reasons:
804 * - physical memory bank size is not necessarily the exact multiple of the
805 * arbitrary section size
806 * - early reserved memory may not be listed in memblock.memory
807 * - non-memory regions covered by the contigious flatmem mapping
808 * - memory layouts defined with memmap= kernel parameter may not align
809 * nicely with memmap sections
811 * Explicitly initialize those struct pages so that:
812 * - PG_Reserved is set
813 * - zone and node links point to zone and node that span the page if the
814 * hole is in the middle of a zone
815 * - zone and node links point to adjacent zone/node if the hole falls on
816 * the zone boundary; the pages in such holes will be prepended to the
817 * zone/node above the hole except for the trailing pages in the last
818 * section that will be appended to the zone/node below.
820 static void __init init_unavailable_range(unsigned long spfn,
821 unsigned long epfn,
822 int zone, int node)
824 unsigned long pfn;
825 u64 pgcnt = 0;
827 for (pfn = spfn; pfn < epfn; pfn++) {
828 if (!pfn_valid(pageblock_start_pfn(pfn))) {
829 pfn = pageblock_end_pfn(pfn) - 1;
830 continue;
832 __init_single_page(pfn_to_page(pfn), pfn, zone, node);
833 __SetPageReserved(pfn_to_page(pfn));
834 pgcnt++;
837 if (pgcnt)
838 pr_info("On node %d, zone %s: %lld pages in unavailable ranges\n",
839 node, zone_names[zone], pgcnt);
843 * Initially all pages are reserved - free ones are freed
844 * up by memblock_free_all() once the early boot process is
845 * done. Non-atomic initialization, single-pass.
847 * All aligned pageblocks are initialized to the specified migratetype
848 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
849 * zone stats (e.g., nr_isolate_pageblock) are touched.
851 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
852 unsigned long start_pfn, unsigned long zone_end_pfn,
853 enum meminit_context context,
854 struct vmem_altmap *altmap, int migratetype)
856 unsigned long pfn, end_pfn = start_pfn + size;
857 struct page *page;
859 if (highest_memmap_pfn < end_pfn - 1)
860 highest_memmap_pfn = end_pfn - 1;
862 #ifdef CONFIG_ZONE_DEVICE
864 * Honor reservation requested by the driver for this ZONE_DEVICE
865 * memory. We limit the total number of pages to initialize to just
866 * those that might contain the memory mapping. We will defer the
867 * ZONE_DEVICE page initialization until after we have released
868 * the hotplug lock.
870 if (zone == ZONE_DEVICE) {
871 if (!altmap)
872 return;
874 if (start_pfn == altmap->base_pfn)
875 start_pfn += altmap->reserve;
876 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
878 #endif
880 for (pfn = start_pfn; pfn < end_pfn; ) {
882 * There can be holes in boot-time mem_map[]s handed to this
883 * function. They do not exist on hotplugged memory.
885 if (context == MEMINIT_EARLY) {
886 if (overlap_memmap_init(zone, &pfn))
887 continue;
888 if (defer_init(nid, pfn, zone_end_pfn)) {
889 deferred_struct_pages = true;
890 break;
894 page = pfn_to_page(pfn);
895 __init_single_page(page, pfn, zone, nid);
896 if (context == MEMINIT_HOTPLUG) {
897 #ifdef CONFIG_ZONE_DEVICE
898 if (zone == ZONE_DEVICE)
899 __SetPageReserved(page);
900 else
901 #endif
902 __SetPageOffline(page);
906 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
907 * such that unmovable allocations won't be scattered all
908 * over the place during system boot.
910 if (pageblock_aligned(pfn)) {
911 set_pageblock_migratetype(page, migratetype);
912 cond_resched();
914 pfn++;
918 static void __init memmap_init_zone_range(struct zone *zone,
919 unsigned long start_pfn,
920 unsigned long end_pfn,
921 unsigned long *hole_pfn)
923 unsigned long zone_start_pfn = zone->zone_start_pfn;
924 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
925 int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
927 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
928 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
930 if (start_pfn >= end_pfn)
931 return;
933 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
934 zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
936 if (*hole_pfn < start_pfn)
937 init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
939 *hole_pfn = end_pfn;
942 static void __init memmap_init(void)
944 unsigned long start_pfn, end_pfn;
945 unsigned long hole_pfn = 0;
946 int i, j, zone_id = 0, nid;
948 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
949 struct pglist_data *node = NODE_DATA(nid);
951 for (j = 0; j < MAX_NR_ZONES; j++) {
952 struct zone *zone = node->node_zones + j;
954 if (!populated_zone(zone))
955 continue;
957 memmap_init_zone_range(zone, start_pfn, end_pfn,
958 &hole_pfn);
959 zone_id = j;
963 #ifdef CONFIG_SPARSEMEM
965 * Initialize the memory map for hole in the range [memory_end,
966 * section_end].
967 * Append the pages in this hole to the highest zone in the last
968 * node.
969 * The call to init_unavailable_range() is outside the ifdef to
970 * silence the compiler warining about zone_id set but not used;
971 * for FLATMEM it is a nop anyway
973 end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
974 if (hole_pfn < end_pfn)
975 #endif
976 init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
979 #ifdef CONFIG_ZONE_DEVICE
980 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
981 unsigned long zone_idx, int nid,
982 struct dev_pagemap *pgmap)
985 __init_single_page(page, pfn, zone_idx, nid);
988 * Mark page reserved as it will need to wait for onlining
989 * phase for it to be fully associated with a zone.
991 * We can use the non-atomic __set_bit operation for setting
992 * the flag as we are still initializing the pages.
994 __SetPageReserved(page);
997 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
998 * and zone_device_data. It is a bug if a ZONE_DEVICE page is
999 * ever freed or placed on a driver-private list.
1001 page->pgmap = pgmap;
1002 page->zone_device_data = NULL;
1005 * Mark the block movable so that blocks are reserved for
1006 * movable at startup. This will force kernel allocations
1007 * to reserve their blocks rather than leaking throughout
1008 * the address space during boot when many long-lived
1009 * kernel allocations are made.
1011 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
1012 * because this is done early in section_activate()
1014 if (pageblock_aligned(pfn)) {
1015 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1016 cond_resched();
1020 * ZONE_DEVICE pages are released directly to the driver page allocator
1021 * which will set the page count to 1 when allocating the page.
1023 if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
1024 pgmap->type == MEMORY_DEVICE_COHERENT)
1025 set_page_count(page, 0);
1029 * With compound page geometry and when struct pages are stored in ram most
1030 * tail pages are reused. Consequently, the amount of unique struct pages to
1031 * initialize is a lot smaller that the total amount of struct pages being
1032 * mapped. This is a paired / mild layering violation with explicit knowledge
1033 * of how the sparse_vmemmap internals handle compound pages in the lack
1034 * of an altmap. See vmemmap_populate_compound_pages().
1036 static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
1037 struct dev_pagemap *pgmap)
1039 if (!vmemmap_can_optimize(altmap, pgmap))
1040 return pgmap_vmemmap_nr(pgmap);
1042 return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page));
1045 static void __ref memmap_init_compound(struct page *head,
1046 unsigned long head_pfn,
1047 unsigned long zone_idx, int nid,
1048 struct dev_pagemap *pgmap,
1049 unsigned long nr_pages)
1051 unsigned long pfn, end_pfn = head_pfn + nr_pages;
1052 unsigned int order = pgmap->vmemmap_shift;
1054 __SetPageHead(head);
1055 for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
1056 struct page *page = pfn_to_page(pfn);
1058 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1059 prep_compound_tail(head, pfn - head_pfn);
1060 set_page_count(page, 0);
1063 * The first tail page stores important compound page info.
1064 * Call prep_compound_head() after the first tail page has
1065 * been initialized, to not have the data overwritten.
1067 if (pfn == head_pfn + 1)
1068 prep_compound_head(head, order);
1072 void __ref memmap_init_zone_device(struct zone *zone,
1073 unsigned long start_pfn,
1074 unsigned long nr_pages,
1075 struct dev_pagemap *pgmap)
1077 unsigned long pfn, end_pfn = start_pfn + nr_pages;
1078 struct pglist_data *pgdat = zone->zone_pgdat;
1079 struct vmem_altmap *altmap = pgmap_altmap(pgmap);
1080 unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap);
1081 unsigned long zone_idx = zone_idx(zone);
1082 unsigned long start = jiffies;
1083 int nid = pgdat->node_id;
1085 if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE))
1086 return;
1089 * The call to memmap_init should have already taken care
1090 * of the pages reserved for the memmap, so we can just jump to
1091 * the end of that region and start processing the device pages.
1093 if (altmap) {
1094 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
1095 nr_pages = end_pfn - start_pfn;
1098 for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
1099 struct page *page = pfn_to_page(pfn);
1101 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1103 if (pfns_per_compound == 1)
1104 continue;
1106 memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
1107 compound_nr_pages(altmap, pgmap));
1110 pr_debug("%s initialised %lu pages in %ums\n", __func__,
1111 nr_pages, jiffies_to_msecs(jiffies - start));
1113 #endif
1116 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
1117 * because it is sized independent of architecture. Unlike the other zones,
1118 * the starting point for ZONE_MOVABLE is not fixed. It may be different
1119 * in each node depending on the size of each node and how evenly kernelcore
1120 * is distributed. This helper function adjusts the zone ranges
1121 * provided by the architecture for a given node by using the end of the
1122 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
1123 * zones within a node are in order of monotonic increases memory addresses
1125 static void __init adjust_zone_range_for_zone_movable(int nid,
1126 unsigned long zone_type,
1127 unsigned long node_end_pfn,
1128 unsigned long *zone_start_pfn,
1129 unsigned long *zone_end_pfn)
1131 /* Only adjust if ZONE_MOVABLE is on this node */
1132 if (zone_movable_pfn[nid]) {
1133 /* Size ZONE_MOVABLE */
1134 if (zone_type == ZONE_MOVABLE) {
1135 *zone_start_pfn = zone_movable_pfn[nid];
1136 *zone_end_pfn = min(node_end_pfn,
1137 arch_zone_highest_possible_pfn[movable_zone]);
1139 /* Adjust for ZONE_MOVABLE starting within this range */
1140 } else if (!mirrored_kernelcore &&
1141 *zone_start_pfn < zone_movable_pfn[nid] &&
1142 *zone_end_pfn > zone_movable_pfn[nid]) {
1143 *zone_end_pfn = zone_movable_pfn[nid];
1145 /* Check if this whole range is within ZONE_MOVABLE */
1146 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
1147 *zone_start_pfn = *zone_end_pfn;
1152 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
1153 * then all holes in the requested range will be accounted for.
1155 static unsigned long __init __absent_pages_in_range(int nid,
1156 unsigned long range_start_pfn,
1157 unsigned long range_end_pfn)
1159 unsigned long nr_absent = range_end_pfn - range_start_pfn;
1160 unsigned long start_pfn, end_pfn;
1161 int i;
1163 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
1164 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
1165 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
1166 nr_absent -= end_pfn - start_pfn;
1168 return nr_absent;
1172 * absent_pages_in_range - Return number of page frames in holes within a range
1173 * @start_pfn: The start PFN to start searching for holes
1174 * @end_pfn: The end PFN to stop searching for holes
1176 * Return: the number of pages frames in memory holes within a range.
1178 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
1179 unsigned long end_pfn)
1181 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
1184 /* Return the number of page frames in holes in a zone on a node */
1185 static unsigned long __init zone_absent_pages_in_node(int nid,
1186 unsigned long zone_type,
1187 unsigned long zone_start_pfn,
1188 unsigned long zone_end_pfn)
1190 unsigned long nr_absent;
1192 /* zone is empty, we don't have any absent pages */
1193 if (zone_start_pfn == zone_end_pfn)
1194 return 0;
1196 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
1199 * ZONE_MOVABLE handling.
1200 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
1201 * and vice versa.
1203 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
1204 unsigned long start_pfn, end_pfn;
1205 struct memblock_region *r;
1207 for_each_mem_region(r) {
1208 start_pfn = clamp(memblock_region_memory_base_pfn(r),
1209 zone_start_pfn, zone_end_pfn);
1210 end_pfn = clamp(memblock_region_memory_end_pfn(r),
1211 zone_start_pfn, zone_end_pfn);
1213 if (zone_type == ZONE_MOVABLE &&
1214 memblock_is_mirror(r))
1215 nr_absent += end_pfn - start_pfn;
1217 if (zone_type == ZONE_NORMAL &&
1218 !memblock_is_mirror(r))
1219 nr_absent += end_pfn - start_pfn;
1223 return nr_absent;
1227 * Return the number of pages a zone spans in a node, including holes
1228 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
1230 static unsigned long __init zone_spanned_pages_in_node(int nid,
1231 unsigned long zone_type,
1232 unsigned long node_start_pfn,
1233 unsigned long node_end_pfn,
1234 unsigned long *zone_start_pfn,
1235 unsigned long *zone_end_pfn)
1237 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
1238 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
1240 /* Get the start and end of the zone */
1241 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
1242 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
1243 adjust_zone_range_for_zone_movable(nid, zone_type, node_end_pfn,
1244 zone_start_pfn, zone_end_pfn);
1246 /* Check that this node has pages within the zone's required range */
1247 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
1248 return 0;
1250 /* Move the zone boundaries inside the node if necessary */
1251 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
1252 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
1254 /* Return the spanned pages */
1255 return *zone_end_pfn - *zone_start_pfn;
1258 static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat)
1260 struct zone *z;
1262 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) {
1263 z->zone_start_pfn = 0;
1264 z->spanned_pages = 0;
1265 z->present_pages = 0;
1266 #if defined(CONFIG_MEMORY_HOTPLUG)
1267 z->present_early_pages = 0;
1268 #endif
1271 pgdat->node_spanned_pages = 0;
1272 pgdat->node_present_pages = 0;
1273 pr_debug("On node %d totalpages: 0\n", pgdat->node_id);
1276 static void __init calc_nr_kernel_pages(void)
1278 unsigned long start_pfn, end_pfn;
1279 phys_addr_t start_addr, end_addr;
1280 u64 u;
1281 #ifdef CONFIG_HIGHMEM
1282 unsigned long high_zone_low = arch_zone_lowest_possible_pfn[ZONE_HIGHMEM];
1283 #endif
1285 for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) {
1286 start_pfn = PFN_UP(start_addr);
1287 end_pfn = PFN_DOWN(end_addr);
1289 if (start_pfn < end_pfn) {
1290 nr_all_pages += end_pfn - start_pfn;
1291 #ifdef CONFIG_HIGHMEM
1292 start_pfn = clamp(start_pfn, 0, high_zone_low);
1293 end_pfn = clamp(end_pfn, 0, high_zone_low);
1294 #endif
1295 nr_kernel_pages += end_pfn - start_pfn;
1300 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
1301 unsigned long node_start_pfn,
1302 unsigned long node_end_pfn)
1304 unsigned long realtotalpages = 0, totalpages = 0;
1305 enum zone_type i;
1307 for (i = 0; i < MAX_NR_ZONES; i++) {
1308 struct zone *zone = pgdat->node_zones + i;
1309 unsigned long zone_start_pfn, zone_end_pfn;
1310 unsigned long spanned, absent;
1311 unsigned long real_size;
1313 spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
1314 node_start_pfn,
1315 node_end_pfn,
1316 &zone_start_pfn,
1317 &zone_end_pfn);
1318 absent = zone_absent_pages_in_node(pgdat->node_id, i,
1319 zone_start_pfn,
1320 zone_end_pfn);
1322 real_size = spanned - absent;
1324 if (spanned)
1325 zone->zone_start_pfn = zone_start_pfn;
1326 else
1327 zone->zone_start_pfn = 0;
1328 zone->spanned_pages = spanned;
1329 zone->present_pages = real_size;
1330 #if defined(CONFIG_MEMORY_HOTPLUG)
1331 zone->present_early_pages = real_size;
1332 #endif
1334 totalpages += spanned;
1335 realtotalpages += real_size;
1338 pgdat->node_spanned_pages = totalpages;
1339 pgdat->node_present_pages = realtotalpages;
1340 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
1343 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1344 static void pgdat_init_split_queue(struct pglist_data *pgdat)
1346 struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
1348 spin_lock_init(&ds_queue->split_queue_lock);
1349 INIT_LIST_HEAD(&ds_queue->split_queue);
1350 ds_queue->split_queue_len = 0;
1352 #else
1353 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
1354 #endif
1356 #ifdef CONFIG_COMPACTION
1357 static void pgdat_init_kcompactd(struct pglist_data *pgdat)
1359 init_waitqueue_head(&pgdat->kcompactd_wait);
1361 #else
1362 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
1363 #endif
1365 static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
1367 int i;
1369 pgdat_resize_init(pgdat);
1370 pgdat_kswapd_lock_init(pgdat);
1372 pgdat_init_split_queue(pgdat);
1373 pgdat_init_kcompactd(pgdat);
1375 init_waitqueue_head(&pgdat->kswapd_wait);
1376 init_waitqueue_head(&pgdat->pfmemalloc_wait);
1378 for (i = 0; i < NR_VMSCAN_THROTTLE; i++)
1379 init_waitqueue_head(&pgdat->reclaim_wait[i]);
1381 pgdat_page_ext_init(pgdat);
1382 lruvec_init(&pgdat->__lruvec);
1385 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
1386 unsigned long remaining_pages)
1388 atomic_long_set(&zone->managed_pages, remaining_pages);
1389 zone_set_nid(zone, nid);
1390 zone->name = zone_names[idx];
1391 zone->zone_pgdat = NODE_DATA(nid);
1392 spin_lock_init(&zone->lock);
1393 zone_seqlock_init(zone);
1394 zone_pcp_init(zone);
1397 static void __meminit zone_init_free_lists(struct zone *zone)
1399 unsigned int order, t;
1400 for_each_migratetype_order(order, t) {
1401 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1402 zone->free_area[order].nr_free = 0;
1405 #ifdef CONFIG_UNACCEPTED_MEMORY
1406 INIT_LIST_HEAD(&zone->unaccepted_pages);
1407 #endif
1410 void __meminit init_currently_empty_zone(struct zone *zone,
1411 unsigned long zone_start_pfn,
1412 unsigned long size)
1414 struct pglist_data *pgdat = zone->zone_pgdat;
1415 int zone_idx = zone_idx(zone) + 1;
1417 if (zone_idx > pgdat->nr_zones)
1418 pgdat->nr_zones = zone_idx;
1420 zone->zone_start_pfn = zone_start_pfn;
1422 mminit_dprintk(MMINIT_TRACE, "memmap_init",
1423 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
1424 pgdat->node_id,
1425 (unsigned long)zone_idx(zone),
1426 zone_start_pfn, (zone_start_pfn + size));
1428 zone_init_free_lists(zone);
1429 zone->initialized = 1;
1432 #ifndef CONFIG_SPARSEMEM
1434 * Calculate the size of the zone->blockflags rounded to an unsigned long
1435 * Start by making sure zonesize is a multiple of pageblock_order by rounding
1436 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
1437 * round what is now in bits to nearest long in bits, then return it in
1438 * bytes.
1440 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
1442 unsigned long usemapsize;
1444 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
1445 usemapsize = roundup(zonesize, pageblock_nr_pages);
1446 usemapsize = usemapsize >> pageblock_order;
1447 usemapsize *= NR_PAGEBLOCK_BITS;
1448 usemapsize = roundup(usemapsize, BITS_PER_LONG);
1450 return usemapsize / BITS_PER_BYTE;
1453 static void __ref setup_usemap(struct zone *zone)
1455 unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
1456 zone->spanned_pages);
1457 zone->pageblock_flags = NULL;
1458 if (usemapsize) {
1459 zone->pageblock_flags =
1460 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
1461 zone_to_nid(zone));
1462 if (!zone->pageblock_flags)
1463 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
1464 usemapsize, zone->name, zone_to_nid(zone));
1467 #else
1468 static inline void setup_usemap(struct zone *zone) {}
1469 #endif /* CONFIG_SPARSEMEM */
1471 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
1473 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
1474 void __init set_pageblock_order(void)
1476 unsigned int order = MAX_PAGE_ORDER;
1478 /* Check that pageblock_nr_pages has not already been setup */
1479 if (pageblock_order)
1480 return;
1482 /* Don't let pageblocks exceed the maximum allocation granularity. */
1483 if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order)
1484 order = HUGETLB_PAGE_ORDER;
1487 * Assume the largest contiguous order of interest is a huge page.
1488 * This value may be variable depending on boot parameters on powerpc.
1490 pageblock_order = order;
1492 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1495 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
1496 * is unused as pageblock_order is set at compile-time. See
1497 * include/linux/pageblock-flags.h for the values of pageblock_order based on
1498 * the kernel config
1500 void __init set_pageblock_order(void)
1504 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1507 * Set up the zone data structures
1508 * - init pgdat internals
1509 * - init all zones belonging to this node
1511 * NOTE: this function is only called during memory hotplug
1513 #ifdef CONFIG_MEMORY_HOTPLUG
1514 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
1516 int nid = pgdat->node_id;
1517 enum zone_type z;
1518 int cpu;
1520 pgdat_init_internals(pgdat);
1522 if (pgdat->per_cpu_nodestats == &boot_nodestats)
1523 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
1526 * Reset the nr_zones, order and highest_zoneidx before reuse.
1527 * Note that kswapd will init kswapd_highest_zoneidx properly
1528 * when it starts in the near future.
1530 pgdat->nr_zones = 0;
1531 pgdat->kswapd_order = 0;
1532 pgdat->kswapd_highest_zoneidx = 0;
1533 pgdat->node_start_pfn = 0;
1534 pgdat->node_present_pages = 0;
1536 for_each_online_cpu(cpu) {
1537 struct per_cpu_nodestat *p;
1539 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
1540 memset(p, 0, sizeof(*p));
1544 * When memory is hot-added, all the memory is in offline state. So
1545 * clear all zones' present_pages and managed_pages because they will
1546 * be updated in online_pages() and offline_pages().
1548 for (z = 0; z < MAX_NR_ZONES; z++) {
1549 struct zone *zone = pgdat->node_zones + z;
1551 zone->present_pages = 0;
1552 zone_init_internals(zone, z, nid, 0);
1555 #endif
1557 static void __init free_area_init_core(struct pglist_data *pgdat)
1559 enum zone_type j;
1560 int nid = pgdat->node_id;
1562 pgdat_init_internals(pgdat);
1563 pgdat->per_cpu_nodestats = &boot_nodestats;
1565 for (j = 0; j < MAX_NR_ZONES; j++) {
1566 struct zone *zone = pgdat->node_zones + j;
1567 unsigned long size = zone->spanned_pages;
1570 * Initialize zone->managed_pages as 0 , it will be reset
1571 * when memblock allocator frees pages into buddy system.
1573 zone_init_internals(zone, j, nid, zone->present_pages);
1575 if (!size)
1576 continue;
1578 setup_usemap(zone);
1579 init_currently_empty_zone(zone, zone->zone_start_pfn, size);
1583 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
1584 phys_addr_t min_addr, int nid, bool exact_nid)
1586 void *ptr;
1588 if (exact_nid)
1589 ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
1590 MEMBLOCK_ALLOC_ACCESSIBLE,
1591 nid);
1592 else
1593 ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
1594 MEMBLOCK_ALLOC_ACCESSIBLE,
1595 nid);
1597 if (ptr && size > 0)
1598 page_init_poison(ptr, size);
1600 return ptr;
1603 #ifdef CONFIG_FLATMEM
1604 static void __init alloc_node_mem_map(struct pglist_data *pgdat)
1606 unsigned long start, offset, size, end;
1607 struct page *map;
1609 /* Skip empty nodes */
1610 if (!pgdat->node_spanned_pages)
1611 return;
1613 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
1614 offset = pgdat->node_start_pfn - start;
1616 * The zone's endpoints aren't required to be MAX_PAGE_ORDER
1617 * aligned but the node_mem_map endpoints must be in order
1618 * for the buddy allocator to function correctly.
1620 end = ALIGN(pgdat_end_pfn(pgdat), MAX_ORDER_NR_PAGES);
1621 size = (end - start) * sizeof(struct page);
1622 map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
1623 pgdat->node_id, false);
1624 if (!map)
1625 panic("Failed to allocate %ld bytes for node %d memory map\n",
1626 size, pgdat->node_id);
1627 pgdat->node_mem_map = map + offset;
1628 memmap_boot_pages_add(DIV_ROUND_UP(size, PAGE_SIZE));
1629 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
1630 __func__, pgdat->node_id, (unsigned long)pgdat,
1631 (unsigned long)pgdat->node_mem_map);
1632 #ifndef CONFIG_NUMA
1633 /* the global mem_map is just set as node 0's */
1634 if (pgdat == NODE_DATA(0)) {
1635 mem_map = NODE_DATA(0)->node_mem_map;
1636 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
1637 mem_map -= offset;
1639 #endif
1641 #else
1642 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
1643 #endif /* CONFIG_FLATMEM */
1646 * get_pfn_range_for_nid - Return the start and end page frames for a node
1647 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
1648 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
1649 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
1651 * It returns the start and end page frame of a node based on information
1652 * provided by memblock_set_node(). If called for a node
1653 * with no available memory, the start and end PFNs will be 0.
1655 void __init get_pfn_range_for_nid(unsigned int nid,
1656 unsigned long *start_pfn, unsigned long *end_pfn)
1658 unsigned long this_start_pfn, this_end_pfn;
1659 int i;
1661 *start_pfn = -1UL;
1662 *end_pfn = 0;
1664 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
1665 *start_pfn = min(*start_pfn, this_start_pfn);
1666 *end_pfn = max(*end_pfn, this_end_pfn);
1669 if (*start_pfn == -1UL)
1670 *start_pfn = 0;
1673 static void __init free_area_init_node(int nid)
1675 pg_data_t *pgdat = NODE_DATA(nid);
1676 unsigned long start_pfn = 0;
1677 unsigned long end_pfn = 0;
1679 /* pg_data_t should be reset to zero when it's allocated */
1680 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
1682 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1684 pgdat->node_id = nid;
1685 pgdat->node_start_pfn = start_pfn;
1686 pgdat->per_cpu_nodestats = NULL;
1688 if (start_pfn != end_pfn) {
1689 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
1690 (u64)start_pfn << PAGE_SHIFT,
1691 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
1693 calculate_node_totalpages(pgdat, start_pfn, end_pfn);
1694 } else {
1695 pr_info("Initmem setup node %d as memoryless\n", nid);
1697 reset_memoryless_node_totalpages(pgdat);
1700 alloc_node_mem_map(pgdat);
1701 pgdat_set_deferred_range(pgdat);
1703 free_area_init_core(pgdat);
1704 lru_gen_init_pgdat(pgdat);
1707 /* Any regular or high memory on that node ? */
1708 static void __init check_for_memory(pg_data_t *pgdat)
1710 enum zone_type zone_type;
1712 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
1713 struct zone *zone = &pgdat->node_zones[zone_type];
1714 if (populated_zone(zone)) {
1715 if (IS_ENABLED(CONFIG_HIGHMEM))
1716 node_set_state(pgdat->node_id, N_HIGH_MEMORY);
1717 if (zone_type <= ZONE_NORMAL)
1718 node_set_state(pgdat->node_id, N_NORMAL_MEMORY);
1719 break;
1724 #if MAX_NUMNODES > 1
1726 * Figure out the number of possible node ids.
1728 void __init setup_nr_node_ids(void)
1730 unsigned int highest;
1732 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
1733 nr_node_ids = highest + 1;
1735 #endif
1738 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
1739 * such cases we allow max_zone_pfn sorted in the descending order
1741 static bool arch_has_descending_max_zone_pfns(void)
1743 return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40);
1747 * free_area_init - Initialise all pg_data_t and zone data
1748 * @max_zone_pfn: an array of max PFNs for each zone
1750 * This will call free_area_init_node() for each active node in the system.
1751 * Using the page ranges provided by memblock_set_node(), the size of each
1752 * zone in each node and their holes is calculated. If the maximum PFN
1753 * between two adjacent zones match, it is assumed that the zone is empty.
1754 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
1755 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
1756 * starts where the previous one ended. For example, ZONE_DMA32 starts
1757 * at arch_max_dma_pfn.
1759 void __init free_area_init(unsigned long *max_zone_pfn)
1761 unsigned long start_pfn, end_pfn;
1762 int i, nid, zone;
1763 bool descending;
1765 /* Record where the zone boundaries are */
1766 memset(arch_zone_lowest_possible_pfn, 0,
1767 sizeof(arch_zone_lowest_possible_pfn));
1768 memset(arch_zone_highest_possible_pfn, 0,
1769 sizeof(arch_zone_highest_possible_pfn));
1771 start_pfn = PHYS_PFN(memblock_start_of_DRAM());
1772 descending = arch_has_descending_max_zone_pfns();
1774 for (i = 0; i < MAX_NR_ZONES; i++) {
1775 if (descending)
1776 zone = MAX_NR_ZONES - i - 1;
1777 else
1778 zone = i;
1780 if (zone == ZONE_MOVABLE)
1781 continue;
1783 end_pfn = max(max_zone_pfn[zone], start_pfn);
1784 arch_zone_lowest_possible_pfn[zone] = start_pfn;
1785 arch_zone_highest_possible_pfn[zone] = end_pfn;
1787 start_pfn = end_pfn;
1790 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
1791 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
1792 find_zone_movable_pfns_for_nodes();
1794 /* Print out the zone ranges */
1795 pr_info("Zone ranges:\n");
1796 for (i = 0; i < MAX_NR_ZONES; i++) {
1797 if (i == ZONE_MOVABLE)
1798 continue;
1799 pr_info(" %-8s ", zone_names[i]);
1800 if (arch_zone_lowest_possible_pfn[i] ==
1801 arch_zone_highest_possible_pfn[i])
1802 pr_cont("empty\n");
1803 else
1804 pr_cont("[mem %#018Lx-%#018Lx]\n",
1805 (u64)arch_zone_lowest_possible_pfn[i]
1806 << PAGE_SHIFT,
1807 ((u64)arch_zone_highest_possible_pfn[i]
1808 << PAGE_SHIFT) - 1);
1811 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
1812 pr_info("Movable zone start for each node\n");
1813 for (i = 0; i < MAX_NUMNODES; i++) {
1814 if (zone_movable_pfn[i])
1815 pr_info(" Node %d: %#018Lx\n", i,
1816 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
1820 * Print out the early node map, and initialize the
1821 * subsection-map relative to active online memory ranges to
1822 * enable future "sub-section" extensions of the memory map.
1824 pr_info("Early memory node ranges\n");
1825 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
1826 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
1827 (u64)start_pfn << PAGE_SHIFT,
1828 ((u64)end_pfn << PAGE_SHIFT) - 1);
1829 subsection_map_init(start_pfn, end_pfn - start_pfn);
1832 /* Initialise every node */
1833 mminit_verify_pageflags_layout();
1834 setup_nr_node_ids();
1835 set_pageblock_order();
1837 for_each_node(nid) {
1838 pg_data_t *pgdat;
1840 if (!node_online(nid))
1841 alloc_offline_node_data(nid);
1843 pgdat = NODE_DATA(nid);
1844 free_area_init_node(nid);
1847 * No sysfs hierarcy will be created via register_one_node()
1848 *for memory-less node because here it's not marked as N_MEMORY
1849 *and won't be set online later. The benefit is userspace
1850 *program won't be confused by sysfs files/directories of
1851 *memory-less node. The pgdat will get fully initialized by
1852 *hotadd_init_pgdat() when memory is hotplugged into this node.
1854 if (pgdat->node_present_pages) {
1855 node_set_state(nid, N_MEMORY);
1856 check_for_memory(pgdat);
1860 calc_nr_kernel_pages();
1861 memmap_init();
1863 /* disable hash distribution for systems with a single node */
1864 fixup_hashdist();
1868 * node_map_pfn_alignment - determine the maximum internode alignment
1870 * This function should be called after node map is populated and sorted.
1871 * It calculates the maximum power of two alignment which can distinguish
1872 * all the nodes.
1874 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
1875 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
1876 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
1877 * shifted, 1GiB is enough and this function will indicate so.
1879 * This is used to test whether pfn -> nid mapping of the chosen memory
1880 * model has fine enough granularity to avoid incorrect mapping for the
1881 * populated node map.
1883 * Return: the determined alignment in pfn's. 0 if there is no alignment
1884 * requirement (single node).
1886 unsigned long __init node_map_pfn_alignment(void)
1888 unsigned long accl_mask = 0, last_end = 0;
1889 unsigned long start, end, mask;
1890 int last_nid = NUMA_NO_NODE;
1891 int i, nid;
1893 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
1894 if (!start || last_nid < 0 || last_nid == nid) {
1895 last_nid = nid;
1896 last_end = end;
1897 continue;
1901 * Start with a mask granular enough to pin-point to the
1902 * start pfn and tick off bits one-by-one until it becomes
1903 * too coarse to separate the current node from the last.
1905 mask = ~((1 << __ffs(start)) - 1);
1906 while (mask && last_end <= (start & (mask << 1)))
1907 mask <<= 1;
1909 /* accumulate all internode masks */
1910 accl_mask |= mask;
1913 /* convert mask to number of pages */
1914 return ~accl_mask + 1;
1917 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1918 static void __init deferred_free_pages(unsigned long pfn,
1919 unsigned long nr_pages)
1921 struct page *page;
1922 unsigned long i;
1924 if (!nr_pages)
1925 return;
1927 page = pfn_to_page(pfn);
1929 /* Free a large naturally-aligned chunk if possible */
1930 if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
1931 for (i = 0; i < nr_pages; i += pageblock_nr_pages)
1932 set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
1933 __free_pages_core(page, MAX_PAGE_ORDER, MEMINIT_EARLY);
1934 return;
1937 /* Accept chunks smaller than MAX_PAGE_ORDER upfront */
1938 accept_memory(PFN_PHYS(pfn), nr_pages * PAGE_SIZE);
1940 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1941 if (pageblock_aligned(pfn))
1942 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1943 __free_pages_core(page, 0, MEMINIT_EARLY);
1947 /* Completion tracking for deferred_init_memmap() threads */
1948 static atomic_t pgdat_init_n_undone __initdata;
1949 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1951 static inline void __init pgdat_init_report_one_done(void)
1953 if (atomic_dec_and_test(&pgdat_init_n_undone))
1954 complete(&pgdat_init_all_done_comp);
1958 * Initialize struct pages. We minimize pfn page lookups and scheduler checks
1959 * by performing it only once every MAX_ORDER_NR_PAGES.
1960 * Return number of pages initialized.
1962 static unsigned long __init deferred_init_pages(struct zone *zone,
1963 unsigned long pfn, unsigned long end_pfn)
1965 int nid = zone_to_nid(zone);
1966 unsigned long nr_pages = end_pfn - pfn;
1967 int zid = zone_idx(zone);
1968 struct page *page = pfn_to_page(pfn);
1970 for (; pfn < end_pfn; pfn++, page++)
1971 __init_single_page(page, pfn, zid, nid);
1972 return nr_pages;
1976 * This function is meant to pre-load the iterator for the zone init from
1977 * a given point.
1978 * Specifically it walks through the ranges starting with initial index
1979 * passed to it until we are caught up to the first_init_pfn value and
1980 * exits there. If we never encounter the value we return false indicating
1981 * there are no valid ranges left.
1983 static bool __init
1984 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
1985 unsigned long *spfn, unsigned long *epfn,
1986 unsigned long first_init_pfn)
1988 u64 j = *i;
1990 if (j == 0)
1991 __next_mem_pfn_range_in_zone(&j, zone, spfn, epfn);
1994 * Start out by walking through the ranges in this zone that have
1995 * already been initialized. We don't need to do anything with them
1996 * so we just need to flush them out of the system.
1998 for_each_free_mem_pfn_range_in_zone_from(j, zone, spfn, epfn) {
1999 if (*epfn <= first_init_pfn)
2000 continue;
2001 if (*spfn < first_init_pfn)
2002 *spfn = first_init_pfn;
2003 *i = j;
2004 return true;
2007 return false;
2011 * Initialize and free pages. We do it in two loops: first we initialize
2012 * struct page, then free to buddy allocator, because while we are
2013 * freeing pages we can access pages that are ahead (computing buddy
2014 * page in __free_one_page()).
2016 * In order to try and keep some memory in the cache we have the loop
2017 * broken along max page order boundaries. This way we will not cause
2018 * any issues with the buddy page computation.
2020 static unsigned long __init
2021 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
2022 unsigned long *end_pfn)
2024 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
2025 unsigned long spfn = *start_pfn, epfn = *end_pfn;
2026 unsigned long nr_pages = 0;
2027 u64 j = *i;
2029 /* First we loop through and initialize the page values */
2030 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
2031 unsigned long t;
2033 if (mo_pfn <= *start_pfn)
2034 break;
2036 t = min(mo_pfn, *end_pfn);
2037 nr_pages += deferred_init_pages(zone, *start_pfn, t);
2039 if (mo_pfn < *end_pfn) {
2040 *start_pfn = mo_pfn;
2041 break;
2045 /* Reset values and now loop through freeing pages as needed */
2046 swap(j, *i);
2048 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
2049 unsigned long t;
2051 if (mo_pfn <= spfn)
2052 break;
2054 t = min(mo_pfn, epfn);
2055 deferred_free_pages(spfn, t - spfn);
2057 if (mo_pfn <= epfn)
2058 break;
2061 return nr_pages;
2064 static void __init
2065 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2066 void *arg)
2068 unsigned long spfn, epfn;
2069 struct zone *zone = arg;
2070 u64 i = 0;
2072 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2075 * Initialize and free pages in MAX_PAGE_ORDER sized increments so that
2076 * we can avoid introducing any issues with the buddy allocator.
2078 while (spfn < end_pfn) {
2079 deferred_init_maxorder(&i, zone, &spfn, &epfn);
2080 cond_resched();
2084 static unsigned int __init
2085 deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2087 return max(cpumask_weight(node_cpumask), 1U);
2090 /* Initialise remaining memory on a node */
2091 static int __init deferred_init_memmap(void *data)
2093 pg_data_t *pgdat = data;
2094 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2095 unsigned long spfn = 0, epfn = 0;
2096 unsigned long first_init_pfn, flags;
2097 unsigned long start = jiffies;
2098 struct zone *zone;
2099 int max_threads;
2100 u64 i = 0;
2102 /* Bind memory initialisation thread to a local node if possible */
2103 if (!cpumask_empty(cpumask))
2104 set_cpus_allowed_ptr(current, cpumask);
2106 pgdat_resize_lock(pgdat, &flags);
2107 first_init_pfn = pgdat->first_deferred_pfn;
2108 if (first_init_pfn == ULONG_MAX) {
2109 pgdat_resize_unlock(pgdat, &flags);
2110 pgdat_init_report_one_done();
2111 return 0;
2114 /* Sanity check boundaries */
2115 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2116 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2117 pgdat->first_deferred_pfn = ULONG_MAX;
2120 * Once we unlock here, the zone cannot be grown anymore, thus if an
2121 * interrupt thread must allocate this early in boot, zone must be
2122 * pre-grown prior to start of deferred page initialization.
2124 pgdat_resize_unlock(pgdat, &flags);
2126 /* Only the highest zone is deferred */
2127 zone = pgdat->node_zones + pgdat->nr_zones - 1;
2129 max_threads = deferred_page_init_max_threads(cpumask);
2131 while (deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, first_init_pfn)) {
2132 first_init_pfn = ALIGN(epfn, PAGES_PER_SECTION);
2133 struct padata_mt_job job = {
2134 .thread_fn = deferred_init_memmap_chunk,
2135 .fn_arg = zone,
2136 .start = spfn,
2137 .size = first_init_pfn - spfn,
2138 .align = PAGES_PER_SECTION,
2139 .min_chunk = PAGES_PER_SECTION,
2140 .max_threads = max_threads,
2141 .numa_aware = false,
2144 padata_do_multithreaded(&job);
2147 /* Sanity check that the next zone really is unpopulated */
2148 WARN_ON(pgdat->nr_zones < MAX_NR_ZONES && populated_zone(++zone));
2150 pr_info("node %d deferred pages initialised in %ums\n",
2151 pgdat->node_id, jiffies_to_msecs(jiffies - start));
2153 pgdat_init_report_one_done();
2154 return 0;
2158 * If this zone has deferred pages, try to grow it by initializing enough
2159 * deferred pages to satisfy the allocation specified by order, rounded up to
2160 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments
2161 * of SECTION_SIZE bytes by initializing struct pages in increments of
2162 * PAGES_PER_SECTION * sizeof(struct page) bytes.
2164 * Return true when zone was grown, otherwise return false. We return true even
2165 * when we grow less than requested, to let the caller decide if there are
2166 * enough pages to satisfy the allocation.
2168 bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
2170 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2171 pg_data_t *pgdat = zone->zone_pgdat;
2172 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2173 unsigned long spfn, epfn, flags;
2174 unsigned long nr_pages = 0;
2175 u64 i = 0;
2177 /* Only the last zone may have deferred pages */
2178 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2179 return false;
2181 pgdat_resize_lock(pgdat, &flags);
2184 * If someone grew this zone while we were waiting for spinlock, return
2185 * true, as there might be enough pages already.
2187 if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2188 pgdat_resize_unlock(pgdat, &flags);
2189 return true;
2192 /* If the zone is empty somebody else may have cleared out the zone */
2193 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2194 first_deferred_pfn)) {
2195 pgdat->first_deferred_pfn = ULONG_MAX;
2196 pgdat_resize_unlock(pgdat, &flags);
2197 /* Retry only once. */
2198 return first_deferred_pfn != ULONG_MAX;
2202 * Initialize and free pages in MAX_PAGE_ORDER sized increments so
2203 * that we can avoid introducing any issues with the buddy
2204 * allocator.
2206 while (spfn < epfn) {
2207 /* update our first deferred PFN for this section */
2208 first_deferred_pfn = spfn;
2210 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2211 touch_nmi_watchdog();
2213 /* We should only stop along section boundaries */
2214 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2215 continue;
2217 /* If our quota has been met we can stop here */
2218 if (nr_pages >= nr_pages_needed)
2219 break;
2222 pgdat->first_deferred_pfn = spfn;
2223 pgdat_resize_unlock(pgdat, &flags);
2225 return nr_pages > 0;
2228 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2230 #ifdef CONFIG_CMA
2231 void __init init_cma_reserved_pageblock(struct page *page)
2233 unsigned i = pageblock_nr_pages;
2234 struct page *p = page;
2236 do {
2237 __ClearPageReserved(p);
2238 set_page_count(p, 0);
2239 } while (++p, --i);
2241 set_pageblock_migratetype(page, MIGRATE_CMA);
2242 set_page_refcounted(page);
2243 /* pages were reserved and not allocated */
2244 clear_page_tag_ref(page);
2245 __free_pages(page, pageblock_order);
2247 adjust_managed_page_count(page, pageblock_nr_pages);
2248 page_zone(page)->cma_pages += pageblock_nr_pages;
2250 #endif
2252 void set_zone_contiguous(struct zone *zone)
2254 unsigned long block_start_pfn = zone->zone_start_pfn;
2255 unsigned long block_end_pfn;
2257 block_end_pfn = pageblock_end_pfn(block_start_pfn);
2258 for (; block_start_pfn < zone_end_pfn(zone);
2259 block_start_pfn = block_end_pfn,
2260 block_end_pfn += pageblock_nr_pages) {
2262 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
2264 if (!__pageblock_pfn_to_page(block_start_pfn,
2265 block_end_pfn, zone))
2266 return;
2267 cond_resched();
2270 /* We confirm that there is no hole */
2271 zone->contiguous = true;
2274 static void __init mem_init_print_info(void);
2275 void __init page_alloc_init_late(void)
2277 struct zone *zone;
2278 int nid;
2280 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2282 /* There will be num_node_state(N_MEMORY) threads */
2283 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2284 for_each_node_state(nid, N_MEMORY) {
2285 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2288 /* Block until all are initialised */
2289 wait_for_completion(&pgdat_init_all_done_comp);
2292 * We initialized the rest of the deferred pages. Permanently disable
2293 * on-demand struct page initialization.
2295 static_branch_disable(&deferred_pages);
2297 /* Reinit limits that are based on free pages after the kernel is up */
2298 files_maxfiles_init();
2299 #endif
2301 /* Accounting of total+free memory is stable at this point. */
2302 mem_init_print_info();
2303 buffer_init();
2305 /* Discard memblock private memory */
2306 memblock_discard();
2308 for_each_node_state(nid, N_MEMORY)
2309 shuffle_free_memory(NODE_DATA(nid));
2311 for_each_populated_zone(zone)
2312 set_zone_contiguous(zone);
2314 /* Initialize page ext after all struct pages are initialized. */
2315 if (deferred_struct_pages)
2316 page_ext_init();
2318 page_alloc_sysctl_init();
2322 * Adaptive scale is meant to reduce sizes of hash tables on large memory
2323 * machines. As memory size is increased the scale is also increased but at
2324 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory
2325 * quadruples the scale is increased by one, which means the size of hash table
2326 * only doubles, instead of quadrupling as well.
2327 * Because 32-bit systems cannot have large physical memory, where this scaling
2328 * makes sense, it is disabled on such platforms.
2330 #if __BITS_PER_LONG > 32
2331 #define ADAPT_SCALE_BASE (64ul << 30)
2332 #define ADAPT_SCALE_SHIFT 2
2333 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
2334 #endif
2337 * allocate a large system hash table from bootmem
2338 * - it is assumed that the hash table must contain an exact power-of-2
2339 * quantity of entries
2340 * - limit is the number of hash buckets, not the total allocation size
2342 void *__init alloc_large_system_hash(const char *tablename,
2343 unsigned long bucketsize,
2344 unsigned long numentries,
2345 int scale,
2346 int flags,
2347 unsigned int *_hash_shift,
2348 unsigned int *_hash_mask,
2349 unsigned long low_limit,
2350 unsigned long high_limit)
2352 unsigned long long max = high_limit;
2353 unsigned long log2qty, size;
2354 void *table;
2355 gfp_t gfp_flags;
2356 bool virt;
2357 bool huge;
2359 /* allow the kernel cmdline to have a say */
2360 if (!numentries) {
2361 /* round applicable memory size up to nearest megabyte */
2362 numentries = nr_kernel_pages;
2364 /* It isn't necessary when PAGE_SIZE >= 1MB */
2365 if (PAGE_SIZE < SZ_1M)
2366 numentries = round_up(numentries, SZ_1M / PAGE_SIZE);
2368 #if __BITS_PER_LONG > 32
2369 if (!high_limit) {
2370 unsigned long adapt;
2372 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
2373 adapt <<= ADAPT_SCALE_SHIFT)
2374 scale++;
2376 #endif
2378 /* limit to 1 bucket per 2^scale bytes of low memory */
2379 if (scale > PAGE_SHIFT)
2380 numentries >>= (scale - PAGE_SHIFT);
2381 else
2382 numentries <<= (PAGE_SHIFT - scale);
2384 if (unlikely((numentries * bucketsize) < PAGE_SIZE))
2385 numentries = PAGE_SIZE / bucketsize;
2387 numentries = roundup_pow_of_two(numentries);
2389 /* limit allocation size to 1/16 total memory by default */
2390 if (max == 0) {
2391 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
2392 do_div(max, bucketsize);
2394 max = min(max, 0x80000000ULL);
2396 if (numentries < low_limit)
2397 numentries = low_limit;
2398 if (numentries > max)
2399 numentries = max;
2401 log2qty = ilog2(numentries);
2403 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
2404 do {
2405 virt = false;
2406 size = bucketsize << log2qty;
2407 if (flags & HASH_EARLY) {
2408 if (flags & HASH_ZERO)
2409 table = memblock_alloc(size, SMP_CACHE_BYTES);
2410 else
2411 table = memblock_alloc_raw(size,
2412 SMP_CACHE_BYTES);
2413 } else if (get_order(size) > MAX_PAGE_ORDER || hashdist) {
2414 table = vmalloc_huge(size, gfp_flags);
2415 virt = true;
2416 if (table)
2417 huge = is_vm_area_hugepages(table);
2418 } else {
2420 * If bucketsize is not a power-of-two, we may free
2421 * some pages at the end of hash table which
2422 * alloc_pages_exact() automatically does
2424 table = alloc_pages_exact(size, gfp_flags);
2425 kmemleak_alloc(table, size, 1, gfp_flags);
2427 } while (!table && size > PAGE_SIZE && --log2qty);
2429 if (!table)
2430 panic("Failed to allocate %s hash table\n", tablename);
2432 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
2433 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
2434 virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
2436 if (_hash_shift)
2437 *_hash_shift = log2qty;
2438 if (_hash_mask)
2439 *_hash_mask = (1 << log2qty) - 1;
2441 return table;
2444 void __init memblock_free_pages(struct page *page, unsigned long pfn,
2445 unsigned int order)
2447 if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) {
2448 int nid = early_pfn_to_nid(pfn);
2450 if (!early_page_initialised(pfn, nid))
2451 return;
2454 if (!kmsan_memblock_free_pages(page, order)) {
2455 /* KMSAN will take care of these pages. */
2456 return;
2459 /* pages were reserved and not allocated */
2460 clear_page_tag_ref(page);
2461 __free_pages_core(page, order, MEMINIT_EARLY);
2464 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
2465 EXPORT_SYMBOL(init_on_alloc);
2467 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
2468 EXPORT_SYMBOL(init_on_free);
2470 static bool _init_on_alloc_enabled_early __read_mostly
2471 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
2472 static int __init early_init_on_alloc(char *buf)
2475 return kstrtobool(buf, &_init_on_alloc_enabled_early);
2477 early_param("init_on_alloc", early_init_on_alloc);
2479 static bool _init_on_free_enabled_early __read_mostly
2480 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
2481 static int __init early_init_on_free(char *buf)
2483 return kstrtobool(buf, &_init_on_free_enabled_early);
2485 early_param("init_on_free", early_init_on_free);
2487 DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
2490 * Enable static keys related to various memory debugging and hardening options.
2491 * Some override others, and depend on early params that are evaluated in the
2492 * order of appearance. So we need to first gather the full picture of what was
2493 * enabled, and then make decisions.
2495 static void __init mem_debugging_and_hardening_init(void)
2497 bool page_poisoning_requested = false;
2498 bool want_check_pages = false;
2500 #ifdef CONFIG_PAGE_POISONING
2502 * Page poisoning is debug page alloc for some arches. If
2503 * either of those options are enabled, enable poisoning.
2505 if (page_poisoning_enabled() ||
2506 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
2507 debug_pagealloc_enabled())) {
2508 static_branch_enable(&_page_poisoning_enabled);
2509 page_poisoning_requested = true;
2510 want_check_pages = true;
2512 #endif
2514 if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
2515 page_poisoning_requested) {
2516 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
2517 "will take precedence over init_on_alloc and init_on_free\n");
2518 _init_on_alloc_enabled_early = false;
2519 _init_on_free_enabled_early = false;
2522 if (_init_on_alloc_enabled_early) {
2523 want_check_pages = true;
2524 static_branch_enable(&init_on_alloc);
2525 } else {
2526 static_branch_disable(&init_on_alloc);
2529 if (_init_on_free_enabled_early) {
2530 want_check_pages = true;
2531 static_branch_enable(&init_on_free);
2532 } else {
2533 static_branch_disable(&init_on_free);
2536 if (IS_ENABLED(CONFIG_KMSAN) &&
2537 (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
2538 pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
2540 #ifdef CONFIG_DEBUG_PAGEALLOC
2541 if (debug_pagealloc_enabled()) {
2542 want_check_pages = true;
2543 static_branch_enable(&_debug_pagealloc_enabled);
2545 if (debug_guardpage_minorder())
2546 static_branch_enable(&_debug_guardpage_enabled);
2548 #endif
2551 * Any page debugging or hardening option also enables sanity checking
2552 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
2553 * enabled already.
2555 if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
2556 static_branch_enable(&check_pages_enabled);
2559 /* Report memory auto-initialization states for this boot. */
2560 static void __init report_meminit(void)
2562 const char *stack;
2564 if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN))
2565 stack = "all(pattern)";
2566 else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO))
2567 stack = "all(zero)";
2568 else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL))
2569 stack = "byref_all(zero)";
2570 else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF))
2571 stack = "byref(zero)";
2572 else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER))
2573 stack = "__user(zero)";
2574 else
2575 stack = "off";
2577 pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
2578 stack, str_on_off(want_init_on_alloc(GFP_KERNEL)),
2579 str_on_off(want_init_on_free()));
2580 if (want_init_on_free())
2581 pr_info("mem auto-init: clearing system memory may take some time...\n");
2584 static void __init mem_init_print_info(void)
2586 unsigned long physpages, codesize, datasize, rosize, bss_size;
2587 unsigned long init_code_size, init_data_size;
2589 physpages = get_num_physpages();
2590 codesize = _etext - _stext;
2591 datasize = _edata - _sdata;
2592 rosize = __end_rodata - __start_rodata;
2593 bss_size = __bss_stop - __bss_start;
2594 init_data_size = __init_end - __init_begin;
2595 init_code_size = _einittext - _sinittext;
2598 * Detect special cases and adjust section sizes accordingly:
2599 * 1) .init.* may be embedded into .data sections
2600 * 2) .init.text.* may be out of [__init_begin, __init_end],
2601 * please refer to arch/tile/kernel/vmlinux.lds.S.
2602 * 3) .rodata.* may be embedded into .text or .data sections.
2604 #define adj_init_size(start, end, size, pos, adj) \
2605 do { \
2606 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
2607 size -= adj; \
2608 } while (0)
2610 adj_init_size(__init_begin, __init_end, init_data_size,
2611 _sinittext, init_code_size);
2612 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
2613 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
2614 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
2615 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
2617 #undef adj_init_size
2619 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
2620 #ifdef CONFIG_HIGHMEM
2621 ", %luK highmem"
2622 #endif
2623 ")\n",
2624 K(nr_free_pages()), K(physpages),
2625 codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K,
2626 (init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K,
2627 K(physpages - totalram_pages() - totalcma_pages),
2628 K(totalcma_pages)
2629 #ifdef CONFIG_HIGHMEM
2630 , K(totalhigh_pages())
2631 #endif
2636 * Set up kernel memory allocators
2638 void __init mm_core_init(void)
2640 /* Initializations relying on SMP setup */
2641 BUILD_BUG_ON(MAX_ZONELISTS > 2);
2642 build_all_zonelists(NULL);
2643 page_alloc_init_cpuhp();
2644 alloc_tag_sec_init();
2646 * page_ext requires contiguous pages,
2647 * bigger than MAX_PAGE_ORDER unless SPARSEMEM.
2649 page_ext_init_flatmem();
2650 mem_debugging_and_hardening_init();
2651 kfence_alloc_pool_and_metadata();
2652 report_meminit();
2653 kmsan_init_shadow();
2654 stack_depot_early_init();
2655 mem_init();
2656 kmem_cache_init();
2658 * page_owner must be initialized after buddy is ready, and also after
2659 * slab is ready so that stack_depot_init() works properly
2661 page_ext_init_flatmem_late();
2662 kmemleak_init();
2663 ptlock_cache_init();
2664 pgtable_cache_init();
2665 debug_objects_mem_init();
2666 vmalloc_init();
2667 /* If no deferred init page_ext now, as vmap is fully initialized */
2668 if (!deferred_struct_pages)
2669 page_ext_init();
2670 /* Should be run before the first non-init thread is created */
2671 init_espfix_bsp();
2672 /* Should be run after espfix64 is set up. */
2673 pti_init();
2674 kmsan_init_runtime();
2675 mm_cache_init();
2676 execmem_init();