1 #include <linux/bootmem.h>
2 #include <linux/compiler.h>
4 #include <linux/init.h>
7 #include <linux/mmzone.h>
8 #include <linux/huge_mm.h>
9 #include <linux/proc_fs.h>
10 #include <linux/seq_file.h>
11 #include <linux/hugetlb.h>
12 #include <linux/memcontrol.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/page_idle.h>
15 #include <linux/kernel-page-flags.h>
16 #include <asm/uaccess.h>
19 #define KPMSIZE sizeof(u64)
20 #define KPMMASK (KPMSIZE - 1)
21 #define KPMBITS (KPMSIZE * BITS_PER_BYTE)
23 /* /proc/kpagecount - an array exposing page counts
25 * Each entry is a u64 representing the corresponding
26 * physical page count.
28 static ssize_t
kpagecount_read(struct file
*file
, char __user
*buf
,
29 size_t count
, loff_t
*ppos
)
31 u64 __user
*out
= (u64 __user
*)buf
;
33 unsigned long src
= *ppos
;
39 count
= min_t(size_t, count
, (max_pfn
* KPMSIZE
) - src
);
40 if (src
& KPMMASK
|| count
& KPMMASK
)
45 ppage
= pfn_to_page(pfn
);
48 if (!ppage
|| PageSlab(ppage
))
51 pcount
= page_mapcount(ppage
);
53 if (put_user(pcount
, out
)) {
65 *ppos
+= (char __user
*)out
- buf
;
67 ret
= (char __user
*)out
- buf
;
71 static const struct file_operations proc_kpagecount_operations
= {
73 .read
= kpagecount_read
,
76 /* /proc/kpageflags - an array exposing page flags
78 * Each entry is a u64 representing the corresponding
79 * physical page flags.
82 static inline u64
kpf_copy_bit(u64 kflags
, int ubit
, int kbit
)
84 return ((kflags
>> kbit
) & 1) << ubit
;
87 u64
stable_page_flags(struct page
*page
)
93 * pseudo flag: KPF_NOPAGE
94 * it differentiates a memory hole from a page with no flags
97 return 1 << KPF_NOPAGE
;
103 * pseudo flags for the well known (anonymous) memory mapped pages
105 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
106 * simple test in page_mapped() is not enough.
108 if (!PageSlab(page
) && page_mapped(page
))
116 * compound pages: export both head/tail info
117 * they together define a compound page's start/end pos and order
120 u
|= 1 << KPF_COMPOUND_HEAD
;
122 u
|= 1 << KPF_COMPOUND_TAIL
;
126 * PageTransCompound can be true for non-huge compound pages (slab
127 * pages or pages allocated by drivers with __GFP_COMP) because it
128 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
129 * to make sure a given page is a thp, not a non-huge compound page.
131 else if (PageTransCompound(page
)) {
132 struct page
*head
= compound_head(page
);
134 if (PageLRU(head
) || PageAnon(head
))
136 else if (is_huge_zero_page(head
)) {
137 u
|= 1 << KPF_ZERO_PAGE
;
140 } else if (is_zero_pfn(page_to_pfn(page
)))
141 u
|= 1 << KPF_ZERO_PAGE
;
145 * Caveats on high order pages: page->_count will only be set
146 * -1 on the head page; SLUB/SLQB do the same for PG_slab;
147 * SLOB won't set PG_slab at all on compound pages.
152 if (PageBalloon(page
))
153 u
|= 1 << KPF_BALLOON
;
155 if (page_is_idle(page
))
158 u
|= kpf_copy_bit(k
, KPF_LOCKED
, PG_locked
);
160 u
|= kpf_copy_bit(k
, KPF_SLAB
, PG_slab
);
162 u
|= kpf_copy_bit(k
, KPF_ERROR
, PG_error
);
163 u
|= kpf_copy_bit(k
, KPF_DIRTY
, PG_dirty
);
164 u
|= kpf_copy_bit(k
, KPF_UPTODATE
, PG_uptodate
);
165 u
|= kpf_copy_bit(k
, KPF_WRITEBACK
, PG_writeback
);
167 u
|= kpf_copy_bit(k
, KPF_LRU
, PG_lru
);
168 u
|= kpf_copy_bit(k
, KPF_REFERENCED
, PG_referenced
);
169 u
|= kpf_copy_bit(k
, KPF_ACTIVE
, PG_active
);
170 u
|= kpf_copy_bit(k
, KPF_RECLAIM
, PG_reclaim
);
172 u
|= kpf_copy_bit(k
, KPF_SWAPCACHE
, PG_swapcache
);
173 u
|= kpf_copy_bit(k
, KPF_SWAPBACKED
, PG_swapbacked
);
175 u
|= kpf_copy_bit(k
, KPF_UNEVICTABLE
, PG_unevictable
);
176 u
|= kpf_copy_bit(k
, KPF_MLOCKED
, PG_mlocked
);
178 #ifdef CONFIG_MEMORY_FAILURE
179 u
|= kpf_copy_bit(k
, KPF_HWPOISON
, PG_hwpoison
);
182 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
183 u
|= kpf_copy_bit(k
, KPF_UNCACHED
, PG_uncached
);
186 u
|= kpf_copy_bit(k
, KPF_RESERVED
, PG_reserved
);
187 u
|= kpf_copy_bit(k
, KPF_MAPPEDTODISK
, PG_mappedtodisk
);
188 u
|= kpf_copy_bit(k
, KPF_PRIVATE
, PG_private
);
189 u
|= kpf_copy_bit(k
, KPF_PRIVATE_2
, PG_private_2
);
190 u
|= kpf_copy_bit(k
, KPF_OWNER_PRIVATE
, PG_owner_priv_1
);
191 u
|= kpf_copy_bit(k
, KPF_ARCH
, PG_arch_1
);
196 static ssize_t
kpageflags_read(struct file
*file
, char __user
*buf
,
197 size_t count
, loff_t
*ppos
)
199 u64 __user
*out
= (u64 __user
*)buf
;
201 unsigned long src
= *ppos
;
206 count
= min_t(unsigned long, count
, (max_pfn
* KPMSIZE
) - src
);
207 if (src
& KPMMASK
|| count
& KPMMASK
)
212 ppage
= pfn_to_page(pfn
);
216 if (put_user(stable_page_flags(ppage
), out
)) {
228 *ppos
+= (char __user
*)out
- buf
;
230 ret
= (char __user
*)out
- buf
;
234 static const struct file_operations proc_kpageflags_operations
= {
236 .read
= kpageflags_read
,
240 static ssize_t
kpagecgroup_read(struct file
*file
, char __user
*buf
,
241 size_t count
, loff_t
*ppos
)
243 u64 __user
*out
= (u64 __user
*)buf
;
245 unsigned long src
= *ppos
;
251 count
= min_t(unsigned long, count
, (max_pfn
* KPMSIZE
) - src
);
252 if (src
& KPMMASK
|| count
& KPMMASK
)
257 ppage
= pfn_to_page(pfn
);
262 ino
= page_cgroup_ino(ppage
);
266 if (put_user(ino
, out
)) {
278 *ppos
+= (char __user
*)out
- buf
;
280 ret
= (char __user
*)out
- buf
;
284 static const struct file_operations proc_kpagecgroup_operations
= {
286 .read
= kpagecgroup_read
,
288 #endif /* CONFIG_MEMCG */
290 static int __init
proc_page_init(void)
292 proc_create("kpagecount", S_IRUSR
, NULL
, &proc_kpagecount_operations
);
293 proc_create("kpageflags", S_IRUSR
, NULL
, &proc_kpageflags_operations
);
295 proc_create("kpagecgroup", S_IRUSR
, NULL
, &proc_kpagecgroup_operations
);
299 fs_initcall(proc_page_init
);