WIP FPC-III support
[linux/fpc-iii.git] / include / trace / events / mmflags.h
blob67018d367b9f46088bbd29740435b2c473bf393b
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/node.h>
3 #include <linux/mmzone.h>
4 #include <linux/compaction.h>
5 /*
6 * The order of these masks is important. Matching masks will be seen
7 * first and the left over flags will end up showing by themselves.
9 * For example, if we have GFP_KERNEL before GFP_USER we wil get:
11 * GFP_KERNEL|GFP_HARDWALL
13 * Thus most bits set go first.
16 #define __def_gfpflag_names \
17 {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
18 {(unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
19 {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
20 {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
21 {(unsigned long)GFP_USER, "GFP_USER"}, \
22 {(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
23 {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
24 {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
25 {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
26 {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
27 {(unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \
28 {(unsigned long)GFP_DMA, "GFP_DMA"}, \
29 {(unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \
30 {(unsigned long)GFP_DMA32, "GFP_DMA32"}, \
31 {(unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \
32 {(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
33 {(unsigned long)__GFP_IO, "__GFP_IO"}, \
34 {(unsigned long)__GFP_FS, "__GFP_FS"}, \
35 {(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
36 {(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
37 {(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
38 {(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \
39 {(unsigned long)__GFP_COMP, "__GFP_COMP"}, \
40 {(unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \
41 {(unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \
42 {(unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \
43 {(unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \
44 {(unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \
45 {(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
46 {(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
47 {(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
48 {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
49 {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
50 {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
51 {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\
53 #define show_gfp_flags(flags) \
54 (flags) ? __print_flags(flags, "|", \
55 __def_gfpflag_names \
56 ) : "none"
58 #ifdef CONFIG_MMU
59 #define IF_HAVE_PG_MLOCK(flag,string) ,{1UL << flag, string}
60 #else
61 #define IF_HAVE_PG_MLOCK(flag,string)
62 #endif
64 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
65 #define IF_HAVE_PG_UNCACHED(flag,string) ,{1UL << flag, string}
66 #else
67 #define IF_HAVE_PG_UNCACHED(flag,string)
68 #endif
70 #ifdef CONFIG_MEMORY_FAILURE
71 #define IF_HAVE_PG_HWPOISON(flag,string) ,{1UL << flag, string}
72 #else
73 #define IF_HAVE_PG_HWPOISON(flag,string)
74 #endif
76 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
77 #define IF_HAVE_PG_IDLE(flag,string) ,{1UL << flag, string}
78 #else
79 #define IF_HAVE_PG_IDLE(flag,string)
80 #endif
82 #ifdef CONFIG_64BIT
83 #define IF_HAVE_PG_ARCH_2(flag,string) ,{1UL << flag, string}
84 #else
85 #define IF_HAVE_PG_ARCH_2(flag,string)
86 #endif
88 #define __def_pageflag_names \
89 {1UL << PG_locked, "locked" }, \
90 {1UL << PG_waiters, "waiters" }, \
91 {1UL << PG_error, "error" }, \
92 {1UL << PG_referenced, "referenced" }, \
93 {1UL << PG_uptodate, "uptodate" }, \
94 {1UL << PG_dirty, "dirty" }, \
95 {1UL << PG_lru, "lru" }, \
96 {1UL << PG_active, "active" }, \
97 {1UL << PG_workingset, "workingset" }, \
98 {1UL << PG_slab, "slab" }, \
99 {1UL << PG_owner_priv_1, "owner_priv_1" }, \
100 {1UL << PG_arch_1, "arch_1" }, \
101 {1UL << PG_reserved, "reserved" }, \
102 {1UL << PG_private, "private" }, \
103 {1UL << PG_private_2, "private_2" }, \
104 {1UL << PG_writeback, "writeback" }, \
105 {1UL << PG_head, "head" }, \
106 {1UL << PG_mappedtodisk, "mappedtodisk" }, \
107 {1UL << PG_reclaim, "reclaim" }, \
108 {1UL << PG_swapbacked, "swapbacked" }, \
109 {1UL << PG_unevictable, "unevictable" } \
110 IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \
111 IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \
112 IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \
113 IF_HAVE_PG_IDLE(PG_young, "young" ) \
114 IF_HAVE_PG_IDLE(PG_idle, "idle" ) \
115 IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" )
117 #define show_page_flags(flags) \
118 (flags) ? __print_flags(flags, "|", \
119 __def_pageflag_names \
120 ) : "none"
122 #if defined(CONFIG_X86)
123 #define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
124 #elif defined(CONFIG_PPC)
125 #define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
126 #elif defined(CONFIG_PARISC) || defined(CONFIG_IA64)
127 #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
128 #elif !defined(CONFIG_MMU)
129 #define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" }
130 #else
131 #define __VM_ARCH_SPECIFIC_1 {VM_ARCH_1, "arch_1" }
132 #endif
134 #ifdef CONFIG_MEM_SOFT_DIRTY
135 #define IF_HAVE_VM_SOFTDIRTY(flag,name) {flag, name },
136 #else
137 #define IF_HAVE_VM_SOFTDIRTY(flag,name)
138 #endif
140 #define __def_vmaflag_names \
141 {VM_READ, "read" }, \
142 {VM_WRITE, "write" }, \
143 {VM_EXEC, "exec" }, \
144 {VM_SHARED, "shared" }, \
145 {VM_MAYREAD, "mayread" }, \
146 {VM_MAYWRITE, "maywrite" }, \
147 {VM_MAYEXEC, "mayexec" }, \
148 {VM_MAYSHARE, "mayshare" }, \
149 {VM_GROWSDOWN, "growsdown" }, \
150 {VM_UFFD_MISSING, "uffd_missing" }, \
151 {VM_PFNMAP, "pfnmap" }, \
152 {VM_DENYWRITE, "denywrite" }, \
153 {VM_UFFD_WP, "uffd_wp" }, \
154 {VM_LOCKED, "locked" }, \
155 {VM_IO, "io" }, \
156 {VM_SEQ_READ, "seqread" }, \
157 {VM_RAND_READ, "randread" }, \
158 {VM_DONTCOPY, "dontcopy" }, \
159 {VM_DONTEXPAND, "dontexpand" }, \
160 {VM_LOCKONFAULT, "lockonfault" }, \
161 {VM_ACCOUNT, "account" }, \
162 {VM_NORESERVE, "noreserve" }, \
163 {VM_HUGETLB, "hugetlb" }, \
164 {VM_SYNC, "sync" }, \
165 __VM_ARCH_SPECIFIC_1 , \
166 {VM_WIPEONFORK, "wipeonfork" }, \
167 {VM_DONTDUMP, "dontdump" }, \
168 IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
169 {VM_MIXEDMAP, "mixedmap" }, \
170 {VM_HUGEPAGE, "hugepage" }, \
171 {VM_NOHUGEPAGE, "nohugepage" }, \
172 {VM_MERGEABLE, "mergeable" } \
174 #define show_vma_flags(flags) \
175 (flags) ? __print_flags(flags, "|", \
176 __def_vmaflag_names \
177 ) : "none"
179 #ifdef CONFIG_COMPACTION
180 #define COMPACTION_STATUS \
181 EM( COMPACT_SKIPPED, "skipped") \
182 EM( COMPACT_DEFERRED, "deferred") \
183 EM( COMPACT_CONTINUE, "continue") \
184 EM( COMPACT_SUCCESS, "success") \
185 EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \
186 EM( COMPACT_COMPLETE, "complete") \
187 EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \
188 EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \
189 EMe(COMPACT_CONTENDED, "contended")
191 /* High-level compaction status feedback */
192 #define COMPACTION_FAILED 1
193 #define COMPACTION_WITHDRAWN 2
194 #define COMPACTION_PROGRESS 3
196 #define compact_result_to_feedback(result) \
197 ({ \
198 enum compact_result __result = result; \
199 (compaction_failed(__result)) ? COMPACTION_FAILED : \
200 (compaction_withdrawn(__result)) ? COMPACTION_WITHDRAWN : COMPACTION_PROGRESS; \
203 #define COMPACTION_FEEDBACK \
204 EM(COMPACTION_FAILED, "failed") \
205 EM(COMPACTION_WITHDRAWN, "withdrawn") \
206 EMe(COMPACTION_PROGRESS, "progress")
208 #define COMPACTION_PRIORITY \
209 EM(COMPACT_PRIO_SYNC_FULL, "COMPACT_PRIO_SYNC_FULL") \
210 EM(COMPACT_PRIO_SYNC_LIGHT, "COMPACT_PRIO_SYNC_LIGHT") \
211 EMe(COMPACT_PRIO_ASYNC, "COMPACT_PRIO_ASYNC")
212 #else
213 #define COMPACTION_STATUS
214 #define COMPACTION_PRIORITY
215 #define COMPACTION_FEEDBACK
216 #endif
218 #ifdef CONFIG_ZONE_DMA
219 #define IFDEF_ZONE_DMA(X) X
220 #else
221 #define IFDEF_ZONE_DMA(X)
222 #endif
224 #ifdef CONFIG_ZONE_DMA32
225 #define IFDEF_ZONE_DMA32(X) X
226 #else
227 #define IFDEF_ZONE_DMA32(X)
228 #endif
230 #ifdef CONFIG_HIGHMEM
231 #define IFDEF_ZONE_HIGHMEM(X) X
232 #else
233 #define IFDEF_ZONE_HIGHMEM(X)
234 #endif
236 #define ZONE_TYPE \
237 IFDEF_ZONE_DMA( EM (ZONE_DMA, "DMA")) \
238 IFDEF_ZONE_DMA32( EM (ZONE_DMA32, "DMA32")) \
239 EM (ZONE_NORMAL, "Normal") \
240 IFDEF_ZONE_HIGHMEM( EM (ZONE_HIGHMEM,"HighMem")) \
241 EMe(ZONE_MOVABLE,"Movable")
243 #define LRU_NAMES \
244 EM (LRU_INACTIVE_ANON, "inactive_anon") \
245 EM (LRU_ACTIVE_ANON, "active_anon") \
246 EM (LRU_INACTIVE_FILE, "inactive_file") \
247 EM (LRU_ACTIVE_FILE, "active_file") \
248 EMe(LRU_UNEVICTABLE, "unevictable")
251 * First define the enums in the above macros to be exported to userspace
252 * via TRACE_DEFINE_ENUM().
254 #undef EM
255 #undef EMe
256 #define EM(a, b) TRACE_DEFINE_ENUM(a);
257 #define EMe(a, b) TRACE_DEFINE_ENUM(a);
259 COMPACTION_STATUS
260 COMPACTION_PRIORITY
261 /* COMPACTION_FEEDBACK are defines not enums. Not needed here. */
262 ZONE_TYPE
263 LRU_NAMES
266 * Now redefine the EM() and EMe() macros to map the enums to the strings
267 * that will be printed in the output.
269 #undef EM
270 #undef EMe
271 #define EM(a, b) {a, b},
272 #define EMe(a, b) {a, b}