1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/config.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
10 #ifdef CONFIG_VM_EVENT_COUNTERS
12 * Light weight per cpu counter implementation.
14 * Counters should only be incremented and no critical kernel component
15 * should rely on the counter values.
17 * Counters are handled completely inline. On many platforms the code
18 * generated will simply be the increment of a global address.
21 #ifdef CONFIG_ZONE_DMA32
22 #define DMA32_ZONE(xx) xx##_DMA32,
24 #define DMA32_ZONE(xx)
28 #define HIGHMEM_ZONE(xx) , xx##_HIGH
30 #define HIGHMEM_ZONE(xx)
33 #define FOR_ALL_ZONES(xx) xx##_DMA, DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx)
35 enum vm_event_item
{ PGPGIN
, PGPGOUT
, PSWPIN
, PSWPOUT
,
36 FOR_ALL_ZONES(PGALLOC
),
37 PGFREE
, PGACTIVATE
, PGDEACTIVATE
,
39 FOR_ALL_ZONES(PGREFILL
),
40 FOR_ALL_ZONES(PGSTEAL
),
41 FOR_ALL_ZONES(PGSCAN_KSWAPD
),
42 FOR_ALL_ZONES(PGSCAN_DIRECT
),
43 PGINODESTEAL
, SLABS_SCANNED
, KSWAPD_STEAL
, KSWAPD_INODESTEAL
,
44 PAGEOUTRUN
, ALLOCSTALL
, PGROTATED
,
48 struct vm_event_state
{
49 unsigned long event
[NR_VM_EVENT_ITEMS
];
52 DECLARE_PER_CPU(struct vm_event_state
, vm_event_states
);
54 static inline void __count_vm_event(enum vm_event_item item
)
56 __get_cpu_var(vm_event_states
).event
[item
]++;
59 static inline void count_vm_event(enum vm_event_item item
)
61 get_cpu_var(vm_event_states
).event
[item
]++;
65 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
67 __get_cpu_var(vm_event_states
).event
[item
] += delta
;
70 static inline void count_vm_events(enum vm_event_item item
, long delta
)
72 get_cpu_var(vm_event_states
).event
[item
] += delta
;
76 extern void all_vm_events(unsigned long *);
77 extern void vm_events_fold_cpu(int cpu
);
81 /* Disable counters */
82 #define get_cpu_vm_events(e) 0L
83 #define count_vm_event(e) do { } while (0)
84 #define count_vm_events(e,d) do { } while (0)
85 #define __count_vm_event(e) do { } while (0)
86 #define __count_vm_events(e,d) do { } while (0)
87 #define vm_events_fold_cpu(x) do { } while (0)
89 #endif /* CONFIG_VM_EVENT_COUNTERS */
91 #define __count_zone_vm_events(item, zone, delta) \
92 __count_vm_events(item##_DMA + zone_idx(zone), delta)
95 * Zone based page accounting with per cpu differentials.
97 extern atomic_long_t vm_stat
[NR_VM_ZONE_STAT_ITEMS
];
99 static inline void zone_page_state_add(long x
, struct zone
*zone
,
100 enum zone_stat_item item
)
102 atomic_long_add(x
, &zone
->vm_stat
[item
]);
103 atomic_long_add(x
, &vm_stat
[item
]);
106 static inline unsigned long global_page_state(enum zone_stat_item item
)
108 long x
= atomic_long_read(&vm_stat
[item
]);
116 static inline unsigned long zone_page_state(struct zone
*zone
,
117 enum zone_stat_item item
)
119 long x
= atomic_long_read(&zone
->vm_stat
[item
]);
129 * Determine the per node value of a stat item. This function
130 * is called frequently in a NUMA machine, so try to be as
131 * frugal as possible.
133 static inline unsigned long node_page_state(int node
,
134 enum zone_stat_item item
)
136 struct zone
*zones
= NODE_DATA(node
)->node_zones
;
139 #ifdef CONFIG_ZONE_DMA32
140 zone_page_state(&zones
[ZONE_DMA32
], item
) +
142 zone_page_state(&zones
[ZONE_NORMAL
], item
) +
143 #ifdef CONFIG_HIGHMEM
144 zone_page_state(&zones
[ZONE_HIGHMEM
], item
) +
146 zone_page_state(&zones
[ZONE_DMA
], item
);
149 extern void zone_statistics(struct zonelist
*, struct zone
*);
153 #define node_page_state(node, item) global_page_state(item)
154 #define zone_statistics(_zl,_z) do { } while (0)
156 #endif /* CONFIG_NUMA */
158 #define __add_zone_page_state(__z, __i, __d) \
159 __mod_zone_page_state(__z, __i, __d)
160 #define __sub_zone_page_state(__z, __i, __d) \
161 __mod_zone_page_state(__z, __i,-(__d))
163 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
164 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
166 static inline void zap_zone_vm_stats(struct zone
*zone
)
168 memset(zone
->vm_stat
, 0, sizeof(zone
->vm_stat
));
171 extern void inc_zone_state(struct zone
*, enum zone_stat_item
);
174 void __mod_zone_page_state(struct zone
*, enum zone_stat_item item
, int);
175 void __inc_zone_page_state(struct page
*, enum zone_stat_item
);
176 void __dec_zone_page_state(struct page
*, enum zone_stat_item
);
178 void mod_zone_page_state(struct zone
*, enum zone_stat_item
, int);
179 void inc_zone_page_state(struct page
*, enum zone_stat_item
);
180 void dec_zone_page_state(struct page
*, enum zone_stat_item
);
182 extern void inc_zone_state(struct zone
*, enum zone_stat_item
);
184 void refresh_cpu_vm_stats(int);
185 void refresh_vm_stats(void);
187 #else /* CONFIG_SMP */
190 * We do not maintain differentials in a single processor configuration.
191 * The functions directly modify the zone and global counters.
193 static inline void __mod_zone_page_state(struct zone
*zone
,
194 enum zone_stat_item item
, int delta
)
196 zone_page_state_add(delta
, zone
, item
);
199 static inline void __inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
201 atomic_long_inc(&zone
->vm_stat
[item
]);
202 atomic_long_inc(&vm_stat
[item
]);
205 static inline void __inc_zone_page_state(struct page
*page
,
206 enum zone_stat_item item
)
208 __inc_zone_state(page_zone(page
), item
);
211 static inline void __dec_zone_page_state(struct page
*page
,
212 enum zone_stat_item item
)
214 atomic_long_dec(&page_zone(page
)->vm_stat
[item
]);
215 atomic_long_dec(&vm_stat
[item
]);
219 * We only use atomic operations to update counters. So there is no need to
220 * disable interrupts.
222 #define inc_zone_page_state __inc_zone_page_state
223 #define dec_zone_page_state __dec_zone_page_state
224 #define mod_zone_page_state __mod_zone_page_state
226 static inline void refresh_cpu_vm_stats(int cpu
) { }
227 static inline void refresh_vm_stats(void) { }
230 #endif /* _LINUX_VMSTAT_H */