1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm_init.c - Memory initialisation verification and debugging
5 * Copyright 2008 IBM Corporation, 2008
6 * Author Mel Gorman <mel@csn.ul.ie>
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/kobject.h>
12 #include <linux/export.h>
13 #include <linux/memory.h>
14 #include <linux/notifier.h>
15 #include <linux/sched.h>
16 #include <linux/mman.h>
19 #ifdef CONFIG_DEBUG_MEMORY_INIT
20 int __meminitdata mminit_loglevel
;
22 #ifndef SECTIONS_SHIFT
23 #define SECTIONS_SHIFT 0
26 /* The zonelists are simply reported, validation is manual. */
27 void __init
mminit_verify_zonelist(void)
31 if (mminit_loglevel
< MMINIT_VERIFY
)
34 for_each_online_node(nid
) {
35 pg_data_t
*pgdat
= NODE_DATA(nid
);
38 struct zonelist
*zonelist
;
39 int i
, listid
, zoneid
;
41 BUILD_BUG_ON(MAX_ZONELISTS
> 2);
42 for (i
= 0; i
< MAX_ZONELISTS
* MAX_NR_ZONES
; i
++) {
44 /* Identify the zone and nodelist */
45 zoneid
= i
% MAX_NR_ZONES
;
46 listid
= i
/ MAX_NR_ZONES
;
47 zonelist
= &pgdat
->node_zonelists
[listid
];
48 zone
= &pgdat
->node_zones
[zoneid
];
49 if (!populated_zone(zone
))
52 /* Print information about the zonelist */
53 printk(KERN_DEBUG
"mminit::zonelist %s %d:%s = ",
54 listid
> 0 ? "thisnode" : "general", nid
,
57 /* Iterate the zonelist */
58 for_each_zone_zonelist(zone
, z
, zonelist
, zoneid
)
59 pr_cont("%d:%s ", zone_to_nid(zone
), zone
->name
);
65 void __init
mminit_verify_pageflags_layout(void)
68 unsigned long or_mask
, add_mask
;
70 shift
= 8 * sizeof(unsigned long);
71 width
= shift
- SECTIONS_WIDTH
- NODES_WIDTH
- ZONES_WIDTH
72 - LAST_CPUPID_SHIFT
- KASAN_TAG_WIDTH
;
73 mminit_dprintk(MMINIT_TRACE
, "pageflags_layout_widths",
74 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Flags %d\n",
81 mminit_dprintk(MMINIT_TRACE
, "pageflags_layout_shifts",
82 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
88 mminit_dprintk(MMINIT_TRACE
, "pageflags_layout_pgshifts",
89 "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
90 (unsigned long)SECTIONS_PGSHIFT
,
91 (unsigned long)NODES_PGSHIFT
,
92 (unsigned long)ZONES_PGSHIFT
,
93 (unsigned long)LAST_CPUPID_PGSHIFT
,
94 (unsigned long)KASAN_TAG_PGSHIFT
);
95 mminit_dprintk(MMINIT_TRACE
, "pageflags_layout_nodezoneid",
96 "Node/Zone ID: %lu -> %lu\n",
97 (unsigned long)(ZONEID_PGOFF
+ ZONEID_SHIFT
),
98 (unsigned long)ZONEID_PGOFF
);
99 mminit_dprintk(MMINIT_TRACE
, "pageflags_layout_usage",
100 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
101 shift
, width
, width
, NR_PAGEFLAGS
, NR_PAGEFLAGS
, 0);
102 #ifdef NODE_NOT_IN_PAGE_FLAGS
103 mminit_dprintk(MMINIT_TRACE
, "pageflags_layout_nodeflags",
104 "Node not in page flags");
106 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
107 mminit_dprintk(MMINIT_TRACE
, "pageflags_layout_nodeflags",
108 "Last cpupid not in page flags");
111 if (SECTIONS_WIDTH
) {
112 shift
-= SECTIONS_WIDTH
;
113 BUG_ON(shift
!= SECTIONS_PGSHIFT
);
116 shift
-= NODES_WIDTH
;
117 BUG_ON(shift
!= NODES_PGSHIFT
);
120 shift
-= ZONES_WIDTH
;
121 BUG_ON(shift
!= ZONES_PGSHIFT
);
124 /* Check for bitmask overlaps */
125 or_mask
= (ZONES_MASK
<< ZONES_PGSHIFT
) |
126 (NODES_MASK
<< NODES_PGSHIFT
) |
127 (SECTIONS_MASK
<< SECTIONS_PGSHIFT
);
128 add_mask
= (ZONES_MASK
<< ZONES_PGSHIFT
) +
129 (NODES_MASK
<< NODES_PGSHIFT
) +
130 (SECTIONS_MASK
<< SECTIONS_PGSHIFT
);
131 BUG_ON(or_mask
!= add_mask
);
134 static __init
int set_mminit_loglevel(char *str
)
136 get_option(&str
, &mminit_loglevel
);
139 early_param("mminit_loglevel", set_mminit_loglevel
);
140 #endif /* CONFIG_DEBUG_MEMORY_INIT */
142 struct kobject
*mm_kobj
;
143 EXPORT_SYMBOL_GPL(mm_kobj
);
146 s32 vm_committed_as_batch
= 32;
148 void mm_compute_batch(int overcommit_policy
)
151 s32 nr
= num_present_cpus();
152 s32 batch
= max_t(s32
, nr
*2, 32);
153 unsigned long ram_pages
= totalram_pages();
156 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
157 * (total memory/#cpus), and lift it to 25% for other policies
158 * to easy the possible lock contention for percpu_counter
159 * vm_committed_as, while the max limit is INT_MAX
161 if (overcommit_policy
== OVERCOMMIT_NEVER
)
162 memsized_batch
= min_t(u64
, ram_pages
/nr
/256, INT_MAX
);
164 memsized_batch
= min_t(u64
, ram_pages
/nr
/4, INT_MAX
);
166 vm_committed_as_batch
= max_t(s32
, memsized_batch
, batch
);
169 static int __meminit
mm_compute_batch_notifier(struct notifier_block
*self
,
170 unsigned long action
, void *arg
)
175 mm_compute_batch(sysctl_overcommit_memory
);
183 static struct notifier_block compute_batch_nb __meminitdata
= {
184 .notifier_call
= mm_compute_batch_notifier
,
185 .priority
= IPC_CALLBACK_PRI
, /* use lowest priority */
188 static int __init
mm_compute_batch_init(void)
190 mm_compute_batch(sysctl_overcommit_memory
);
191 register_hotmemory_notifier(&compute_batch_nb
);
196 __initcall(mm_compute_batch_init
);
200 static int __init
mm_sysfs_init(void)
202 mm_kobj
= kobject_create_and_add("mm", kernel_kobj
);
208 postcore_initcall(mm_sysfs_init
);