1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/bootmem.h>
3 #include <linux/compiler.h>
5 #include <linux/init.h>
8 #include <linux/mmzone.h>
9 #include <linux/huge_mm.h>
10 #include <linux/proc_fs.h>
11 #include <linux/seq_file.h>
12 #include <linux/hugetlb.h>
13 #include <linux/memcontrol.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/page_idle.h>
16 #include <linux/kernel-page-flags.h>
17 #include <linux/uaccess.h>
20 #define KPMSIZE sizeof(u64)
21 #define KPMMASK (KPMSIZE - 1)
22 #define KPMBITS (KPMSIZE * BITS_PER_BYTE)
24 /* /proc/kpagecount - an array exposing page counts
26 * Each entry is a u64 representing the corresponding
27 * physical page count.
29 static ssize_t
kpagecount_read(struct file
*file
, char __user
*buf
,
30 size_t count
, loff_t
*ppos
)
32 u64 __user
*out
= (u64 __user
*)buf
;
34 unsigned long src
= *ppos
;
40 count
= min_t(size_t, count
, (max_pfn
* KPMSIZE
) - src
);
41 if (src
& KPMMASK
|| count
& KPMMASK
)
46 ppage
= pfn_to_page(pfn
);
49 if (!ppage
|| PageSlab(ppage
))
52 pcount
= page_mapcount(ppage
);
54 if (put_user(pcount
, out
)) {
66 *ppos
+= (char __user
*)out
- buf
;
68 ret
= (char __user
*)out
- buf
;
72 static const struct file_operations proc_kpagecount_operations
= {
74 .read
= kpagecount_read
,
77 /* /proc/kpageflags - an array exposing page flags
79 * Each entry is a u64 representing the corresponding
80 * physical page flags.
83 static inline u64
kpf_copy_bit(u64 kflags
, int ubit
, int kbit
)
85 return ((kflags
>> kbit
) & 1) << ubit
;
88 u64
stable_page_flags(struct page
*page
)
94 * pseudo flag: KPF_NOPAGE
95 * it differentiates a memory hole from a page with no flags
98 return 1 << KPF_NOPAGE
;
104 * pseudo flags for the well known (anonymous) memory mapped pages
106 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
107 * simple test in page_mapped() is not enough.
109 if (!PageSlab(page
) && page_mapped(page
))
117 * compound pages: export both head/tail info
118 * they together define a compound page's start/end pos and order
121 u
|= 1 << KPF_COMPOUND_HEAD
;
123 u
|= 1 << KPF_COMPOUND_TAIL
;
127 * PageTransCompound can be true for non-huge compound pages (slab
128 * pages or pages allocated by drivers with __GFP_COMP) because it
129 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
130 * to make sure a given page is a thp, not a non-huge compound page.
132 else if (PageTransCompound(page
)) {
133 struct page
*head
= compound_head(page
);
135 if (PageLRU(head
) || PageAnon(head
))
137 else if (is_huge_zero_page(head
)) {
138 u
|= 1 << KPF_ZERO_PAGE
;
141 } else if (is_zero_pfn(page_to_pfn(page
)))
142 u
|= 1 << KPF_ZERO_PAGE
;
146 * Caveats on high order pages: page->_refcount will only be set
147 * -1 on the head page; SLUB/SLQB do the same for PG_slab;
148 * SLOB won't set PG_slab at all on compound pages.
152 else if (page_count(page
) == 0 && is_free_buddy_page(page
))
155 if (PageBalloon(page
))
156 u
|= 1 << KPF_BALLOON
;
158 if (page_is_idle(page
))
161 u
|= kpf_copy_bit(k
, KPF_LOCKED
, PG_locked
);
163 u
|= kpf_copy_bit(k
, KPF_SLAB
, PG_slab
);
164 if (PageTail(page
) && PageSlab(compound_head(page
)))
167 u
|= kpf_copy_bit(k
, KPF_ERROR
, PG_error
);
168 u
|= kpf_copy_bit(k
, KPF_DIRTY
, PG_dirty
);
169 u
|= kpf_copy_bit(k
, KPF_UPTODATE
, PG_uptodate
);
170 u
|= kpf_copy_bit(k
, KPF_WRITEBACK
, PG_writeback
);
172 u
|= kpf_copy_bit(k
, KPF_LRU
, PG_lru
);
173 u
|= kpf_copy_bit(k
, KPF_REFERENCED
, PG_referenced
);
174 u
|= kpf_copy_bit(k
, KPF_ACTIVE
, PG_active
);
175 u
|= kpf_copy_bit(k
, KPF_RECLAIM
, PG_reclaim
);
177 if (PageSwapCache(page
))
178 u
|= 1 << KPF_SWAPCACHE
;
179 u
|= kpf_copy_bit(k
, KPF_SWAPBACKED
, PG_swapbacked
);
181 u
|= kpf_copy_bit(k
, KPF_UNEVICTABLE
, PG_unevictable
);
182 u
|= kpf_copy_bit(k
, KPF_MLOCKED
, PG_mlocked
);
184 #ifdef CONFIG_MEMORY_FAILURE
185 u
|= kpf_copy_bit(k
, KPF_HWPOISON
, PG_hwpoison
);
188 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
189 u
|= kpf_copy_bit(k
, KPF_UNCACHED
, PG_uncached
);
192 u
|= kpf_copy_bit(k
, KPF_RESERVED
, PG_reserved
);
193 u
|= kpf_copy_bit(k
, KPF_MAPPEDTODISK
, PG_mappedtodisk
);
194 u
|= kpf_copy_bit(k
, KPF_PRIVATE
, PG_private
);
195 u
|= kpf_copy_bit(k
, KPF_PRIVATE_2
, PG_private_2
);
196 u
|= kpf_copy_bit(k
, KPF_OWNER_PRIVATE
, PG_owner_priv_1
);
197 u
|= kpf_copy_bit(k
, KPF_ARCH
, PG_arch_1
);
202 static ssize_t
kpageflags_read(struct file
*file
, char __user
*buf
,
203 size_t count
, loff_t
*ppos
)
205 u64 __user
*out
= (u64 __user
*)buf
;
207 unsigned long src
= *ppos
;
212 count
= min_t(unsigned long, count
, (max_pfn
* KPMSIZE
) - src
);
213 if (src
& KPMMASK
|| count
& KPMMASK
)
218 ppage
= pfn_to_page(pfn
);
222 if (put_user(stable_page_flags(ppage
), out
)) {
234 *ppos
+= (char __user
*)out
- buf
;
236 ret
= (char __user
*)out
- buf
;
240 static const struct file_operations proc_kpageflags_operations
= {
242 .read
= kpageflags_read
,
246 static ssize_t
kpagecgroup_read(struct file
*file
, char __user
*buf
,
247 size_t count
, loff_t
*ppos
)
249 u64 __user
*out
= (u64 __user
*)buf
;
251 unsigned long src
= *ppos
;
257 count
= min_t(unsigned long, count
, (max_pfn
* KPMSIZE
) - src
);
258 if (src
& KPMMASK
|| count
& KPMMASK
)
263 ppage
= pfn_to_page(pfn
);
268 ino
= page_cgroup_ino(ppage
);
272 if (put_user(ino
, out
)) {
284 *ppos
+= (char __user
*)out
- buf
;
286 ret
= (char __user
*)out
- buf
;
290 static const struct file_operations proc_kpagecgroup_operations
= {
292 .read
= kpagecgroup_read
,
294 #endif /* CONFIG_MEMCG */
296 static int __init
proc_page_init(void)
298 proc_create("kpagecount", S_IRUSR
, NULL
, &proc_kpagecount_operations
);
299 proc_create("kpageflags", S_IRUSR
, NULL
, &proc_kpageflags_operations
);
301 proc_create("kpagecgroup", S_IRUSR
, NULL
, &proc_kpagecgroup_operations
);
305 fs_initcall(proc_page_init
);