1 #include <linux/bootmem.h>
2 #include <linux/compiler.h>
4 #include <linux/init.h>
7 #include <linux/mmzone.h>
8 #include <linux/huge_mm.h>
9 #include <linux/proc_fs.h>
10 #include <linux/seq_file.h>
11 #include <linux/hugetlb.h>
12 #include <linux/memcontrol.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/page_idle.h>
15 #include <linux/kernel-page-flags.h>
16 #include <asm/uaccess.h>
19 #define KPMSIZE sizeof(u64)
20 #define KPMMASK (KPMSIZE - 1)
21 #define KPMBITS (KPMSIZE * BITS_PER_BYTE)
23 /* /proc/kpagecount - an array exposing page counts
25 * Each entry is a u64 representing the corresponding
26 * physical page count.
28 static ssize_t
kpagecount_read(struct file
*file
, char __user
*buf
,
29 size_t count
, loff_t
*ppos
)
31 u64 __user
*out
= (u64 __user
*)buf
;
33 unsigned long src
= *ppos
;
39 count
= min_t(size_t, count
, (max_pfn
* KPMSIZE
) - src
);
40 if (src
& KPMMASK
|| count
& KPMMASK
)
45 ppage
= pfn_to_page(pfn
);
48 if (!ppage
|| PageSlab(ppage
))
51 pcount
= page_mapcount(ppage
);
53 if (put_user(pcount
, out
)) {
65 *ppos
+= (char __user
*)out
- buf
;
67 ret
= (char __user
*)out
- buf
;
71 static const struct file_operations proc_kpagecount_operations
= {
73 .read
= kpagecount_read
,
76 /* /proc/kpageflags - an array exposing page flags
78 * Each entry is a u64 representing the corresponding
79 * physical page flags.
82 static inline u64
kpf_copy_bit(u64 kflags
, int ubit
, int kbit
)
84 return ((kflags
>> kbit
) & 1) << ubit
;
87 u64
stable_page_flags(struct page
*page
)
93 * pseudo flag: KPF_NOPAGE
94 * it differentiates a memory hole from a page with no flags
97 return 1 << KPF_NOPAGE
;
103 * pseudo flags for the well known (anonymous) memory mapped pages
105 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
106 * simple test in page_mapped() is not enough.
108 if (!PageSlab(page
) && page_mapped(page
))
116 * compound pages: export both head/tail info
117 * they together define a compound page's start/end pos and order
120 u
|= 1 << KPF_COMPOUND_HEAD
;
122 u
|= 1 << KPF_COMPOUND_TAIL
;
126 * PageTransCompound can be true for non-huge compound pages (slab
127 * pages or pages allocated by drivers with __GFP_COMP) because it
128 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
129 * to make sure a given page is a thp, not a non-huge compound page.
131 else if (PageTransCompound(page
)) {
132 struct page
*head
= compound_head(page
);
134 if (PageLRU(head
) || PageAnon(head
))
136 else if (is_huge_zero_page(head
)) {
137 u
|= 1 << KPF_ZERO_PAGE
;
140 } else if (is_zero_pfn(page_to_pfn(page
)))
141 u
|= 1 << KPF_ZERO_PAGE
;
145 * Caveats on high order pages: page->_refcount will only be set
146 * -1 on the head page; SLUB/SLQB do the same for PG_slab;
147 * SLOB won't set PG_slab at all on compound pages.
151 else if (page_count(page
) == 0 && is_free_buddy_page(page
))
154 if (PageBalloon(page
))
155 u
|= 1 << KPF_BALLOON
;
157 if (page_is_idle(page
))
160 u
|= kpf_copy_bit(k
, KPF_LOCKED
, PG_locked
);
162 u
|= kpf_copy_bit(k
, KPF_SLAB
, PG_slab
);
163 if (PageTail(page
) && PageSlab(compound_head(page
)))
166 u
|= kpf_copy_bit(k
, KPF_ERROR
, PG_error
);
167 u
|= kpf_copy_bit(k
, KPF_DIRTY
, PG_dirty
);
168 u
|= kpf_copy_bit(k
, KPF_UPTODATE
, PG_uptodate
);
169 u
|= kpf_copy_bit(k
, KPF_WRITEBACK
, PG_writeback
);
171 u
|= kpf_copy_bit(k
, KPF_LRU
, PG_lru
);
172 u
|= kpf_copy_bit(k
, KPF_REFERENCED
, PG_referenced
);
173 u
|= kpf_copy_bit(k
, KPF_ACTIVE
, PG_active
);
174 u
|= kpf_copy_bit(k
, KPF_RECLAIM
, PG_reclaim
);
176 u
|= kpf_copy_bit(k
, KPF_SWAPCACHE
, PG_swapcache
);
177 u
|= kpf_copy_bit(k
, KPF_SWAPBACKED
, PG_swapbacked
);
179 u
|= kpf_copy_bit(k
, KPF_UNEVICTABLE
, PG_unevictable
);
180 u
|= kpf_copy_bit(k
, KPF_MLOCKED
, PG_mlocked
);
182 #ifdef CONFIG_MEMORY_FAILURE
183 u
|= kpf_copy_bit(k
, KPF_HWPOISON
, PG_hwpoison
);
186 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
187 u
|= kpf_copy_bit(k
, KPF_UNCACHED
, PG_uncached
);
190 u
|= kpf_copy_bit(k
, KPF_RESERVED
, PG_reserved
);
191 u
|= kpf_copy_bit(k
, KPF_MAPPEDTODISK
, PG_mappedtodisk
);
192 u
|= kpf_copy_bit(k
, KPF_PRIVATE
, PG_private
);
193 u
|= kpf_copy_bit(k
, KPF_PRIVATE_2
, PG_private_2
);
194 u
|= kpf_copy_bit(k
, KPF_OWNER_PRIVATE
, PG_owner_priv_1
);
195 u
|= kpf_copy_bit(k
, KPF_ARCH
, PG_arch_1
);
200 static ssize_t
kpageflags_read(struct file
*file
, char __user
*buf
,
201 size_t count
, loff_t
*ppos
)
203 u64 __user
*out
= (u64 __user
*)buf
;
205 unsigned long src
= *ppos
;
210 count
= min_t(unsigned long, count
, (max_pfn
* KPMSIZE
) - src
);
211 if (src
& KPMMASK
|| count
& KPMMASK
)
216 ppage
= pfn_to_page(pfn
);
220 if (put_user(stable_page_flags(ppage
), out
)) {
232 *ppos
+= (char __user
*)out
- buf
;
234 ret
= (char __user
*)out
- buf
;
238 static const struct file_operations proc_kpageflags_operations
= {
240 .read
= kpageflags_read
,
244 static ssize_t
kpagecgroup_read(struct file
*file
, char __user
*buf
,
245 size_t count
, loff_t
*ppos
)
247 u64 __user
*out
= (u64 __user
*)buf
;
249 unsigned long src
= *ppos
;
255 count
= min_t(unsigned long, count
, (max_pfn
* KPMSIZE
) - src
);
256 if (src
& KPMMASK
|| count
& KPMMASK
)
261 ppage
= pfn_to_page(pfn
);
266 ino
= page_cgroup_ino(ppage
);
270 if (put_user(ino
, out
)) {
282 *ppos
+= (char __user
*)out
- buf
;
284 ret
= (char __user
*)out
- buf
;
288 static const struct file_operations proc_kpagecgroup_operations
= {
290 .read
= kpagecgroup_read
,
292 #endif /* CONFIG_MEMCG */
294 static int __init
proc_page_init(void)
296 proc_create("kpagecount", S_IRUSR
, NULL
, &proc_kpagecount_operations
);
297 proc_create("kpageflags", S_IRUSR
, NULL
, &proc_kpageflags_operations
);
299 proc_create("kpagecgroup", S_IRUSR
, NULL
, &proc_kpagecgroup_operations
);
303 fs_initcall(proc_page_init
);