2 * The order of these masks is important. Matching masks will be seen
3 * first and the left over flags will end up showing by themselves.
5 * For example, if we have GFP_KERNEL before GFP_USER we wil get:
7 * GFP_KERNEL|GFP_HARDWALL
9 * Thus most bits set go first.
12 #define __def_gfpflag_names \
13 {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
14 {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
15 {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
16 {(unsigned long)GFP_USER, "GFP_USER"}, \
17 {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \
18 {(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
19 {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
20 {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
21 {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
22 {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
23 {(unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \
24 {(unsigned long)GFP_DMA, "GFP_DMA"}, \
25 {(unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \
26 {(unsigned long)GFP_DMA32, "GFP_DMA32"}, \
27 {(unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \
28 {(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
29 {(unsigned long)__GFP_IO, "__GFP_IO"}, \
30 {(unsigned long)__GFP_FS, "__GFP_FS"}, \
31 {(unsigned long)__GFP_COLD, "__GFP_COLD"}, \
32 {(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
33 {(unsigned long)__GFP_REPEAT, "__GFP_REPEAT"}, \
34 {(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
35 {(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \
36 {(unsigned long)__GFP_COMP, "__GFP_COMP"}, \
37 {(unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \
38 {(unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \
39 {(unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \
40 {(unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \
41 {(unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \
42 {(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
43 {(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
44 {(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
45 {(unsigned long)__GFP_NOTRACK, "__GFP_NOTRACK"}, \
46 {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
47 {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
48 {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
49 {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\
50 {(unsigned long)__GFP_OTHER_NODE, "__GFP_OTHER_NODE"} \
52 #define show_gfp_flags(flags) \
53 (flags) ? __print_flags(flags, "|", \
58 #define IF_HAVE_PG_MLOCK(flag,string) ,{1UL << flag, string}
60 #define IF_HAVE_PG_MLOCK(flag,string)
63 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
64 #define IF_HAVE_PG_UNCACHED(flag,string) ,{1UL << flag, string}
66 #define IF_HAVE_PG_UNCACHED(flag,string)
69 #ifdef CONFIG_MEMORY_FAILURE
70 #define IF_HAVE_PG_HWPOISON(flag,string) ,{1UL << flag, string}
72 #define IF_HAVE_PG_HWPOISON(flag,string)
75 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
76 #define IF_HAVE_PG_IDLE(flag,string) ,{1UL << flag, string}
78 #define IF_HAVE_PG_IDLE(flag,string)
81 #define __def_pageflag_names \
82 {1UL << PG_locked, "locked" }, \
83 {1UL << PG_error, "error" }, \
84 {1UL << PG_referenced, "referenced" }, \
85 {1UL << PG_uptodate, "uptodate" }, \
86 {1UL << PG_dirty, "dirty" }, \
87 {1UL << PG_lru, "lru" }, \
88 {1UL << PG_active, "active" }, \
89 {1UL << PG_slab, "slab" }, \
90 {1UL << PG_owner_priv_1, "owner_priv_1" }, \
91 {1UL << PG_arch_1, "arch_1" }, \
92 {1UL << PG_reserved, "reserved" }, \
93 {1UL << PG_private, "private" }, \
94 {1UL << PG_private_2, "private_2" }, \
95 {1UL << PG_writeback, "writeback" }, \
96 {1UL << PG_head, "head" }, \
97 {1UL << PG_swapcache, "swapcache" }, \
98 {1UL << PG_mappedtodisk, "mappedtodisk" }, \
99 {1UL << PG_reclaim, "reclaim" }, \
100 {1UL << PG_swapbacked, "swapbacked" }, \
101 {1UL << PG_unevictable, "unevictable" } \
102 IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \
103 IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \
104 IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \
105 IF_HAVE_PG_IDLE(PG_young, "young" ) \
106 IF_HAVE_PG_IDLE(PG_idle, "idle" )
108 #define show_page_flags(flags) \
109 (flags) ? __print_flags(flags, "|", \
110 __def_pageflag_names \
113 #if defined(CONFIG_X86)
114 #define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
115 #elif defined(CONFIG_PPC)
116 #define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
117 #elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64)
118 #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
119 #elif !defined(CONFIG_MMU)
120 #define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" }
122 #define __VM_ARCH_SPECIFIC_1 {VM_ARCH_1, "arch_1" }
125 #if defined(CONFIG_X86)
126 #define __VM_ARCH_SPECIFIC_2 {VM_MPX, "mpx" }
128 #define __VM_ARCH_SPECIFIC_2 {VM_ARCH_2, "arch_2" }
131 #ifdef CONFIG_MEM_SOFT_DIRTY
132 #define IF_HAVE_VM_SOFTDIRTY(flag,name) {flag, name },
134 #define IF_HAVE_VM_SOFTDIRTY(flag,name)
137 #define __def_vmaflag_names \
138 {VM_READ, "read" }, \
139 {VM_WRITE, "write" }, \
140 {VM_EXEC, "exec" }, \
141 {VM_SHARED, "shared" }, \
142 {VM_MAYREAD, "mayread" }, \
143 {VM_MAYWRITE, "maywrite" }, \
144 {VM_MAYEXEC, "mayexec" }, \
145 {VM_MAYSHARE, "mayshare" }, \
146 {VM_GROWSDOWN, "growsdown" }, \
147 {VM_UFFD_MISSING, "uffd_missing" }, \
148 {VM_PFNMAP, "pfnmap" }, \
149 {VM_DENYWRITE, "denywrite" }, \
150 {VM_UFFD_WP, "uffd_wp" }, \
151 {VM_LOCKED, "locked" }, \
153 {VM_SEQ_READ, "seqread" }, \
154 {VM_RAND_READ, "randread" }, \
155 {VM_DONTCOPY, "dontcopy" }, \
156 {VM_DONTEXPAND, "dontexpand" }, \
157 {VM_LOCKONFAULT, "lockonfault" }, \
158 {VM_ACCOUNT, "account" }, \
159 {VM_NORESERVE, "noreserve" }, \
160 {VM_HUGETLB, "hugetlb" }, \
161 __VM_ARCH_SPECIFIC_1 , \
162 __VM_ARCH_SPECIFIC_2 , \
163 {VM_DONTDUMP, "dontdump" }, \
164 IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
165 {VM_MIXEDMAP, "mixedmap" }, \
166 {VM_HUGEPAGE, "hugepage" }, \
167 {VM_NOHUGEPAGE, "nohugepage" }, \
168 {VM_MERGEABLE, "mergeable" } \
170 #define show_vma_flags(flags) \
171 (flags) ? __print_flags(flags, "|", \
172 __def_vmaflag_names \