1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PAGE_COUNTER_H
3 #define _LINUX_PAGE_COUNTER_H
5 #include <linux/atomic.h>
6 #include <linux/cache.h>
7 #include <linux/limits.h>
12 * Make sure 'usage' does not share cacheline with any other field. The
13 * memcg->memory.usage is a hot member of struct mem_cgroup.
16 CACHELINE_PADDING(_pad1_
);
18 /* effective memory.min and memory.min usage tracking */
20 atomic_long_t min_usage
;
21 atomic_long_t children_min_usage
;
23 /* effective memory.low and memory.low usage tracking */
25 atomic_long_t low_usage
;
26 atomic_long_t children_low_usage
;
28 unsigned long watermark
;
29 /* Latest cg2 reset watermark */
30 unsigned long local_watermark
;
31 unsigned long failcnt
;
33 /* Keep all the read most fields in a separete cacheline. */
34 CACHELINE_PADDING(_pad2_
);
36 bool protection_support
;
41 struct page_counter
*parent
;
42 } ____cacheline_internodealigned_in_smp
;
44 #if BITS_PER_LONG == 32
45 #define PAGE_COUNTER_MAX LONG_MAX
47 #define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
51 * Protection is supported only for the first counter (with id 0).
53 static inline void page_counter_init(struct page_counter
*counter
,
54 struct page_counter
*parent
,
55 bool protection_support
)
57 counter
->usage
= (atomic_long_t
)ATOMIC_LONG_INIT(0);
58 counter
->max
= PAGE_COUNTER_MAX
;
59 counter
->parent
= parent
;
60 counter
->protection_support
= protection_support
;
63 static inline unsigned long page_counter_read(struct page_counter
*counter
)
65 return atomic_long_read(&counter
->usage
);
68 void page_counter_cancel(struct page_counter
*counter
, unsigned long nr_pages
);
69 void page_counter_charge(struct page_counter
*counter
, unsigned long nr_pages
);
70 bool page_counter_try_charge(struct page_counter
*counter
,
71 unsigned long nr_pages
,
72 struct page_counter
**fail
);
73 void page_counter_uncharge(struct page_counter
*counter
, unsigned long nr_pages
);
74 void page_counter_set_min(struct page_counter
*counter
, unsigned long nr_pages
);
75 void page_counter_set_low(struct page_counter
*counter
, unsigned long nr_pages
);
77 static inline void page_counter_set_high(struct page_counter
*counter
,
78 unsigned long nr_pages
)
80 WRITE_ONCE(counter
->high
, nr_pages
);
83 int page_counter_set_max(struct page_counter
*counter
, unsigned long nr_pages
);
84 int page_counter_memparse(const char *buf
, const char *max
,
85 unsigned long *nr_pages
);
87 static inline void page_counter_reset_watermark(struct page_counter
*counter
)
89 unsigned long usage
= page_counter_read(counter
);
92 * Update local_watermark first, so it's always <= watermark
93 * (modulo CPU/compiler re-ordering)
95 counter
->local_watermark
= usage
;
96 counter
->watermark
= usage
;
100 void page_counter_calculate_protection(struct page_counter
*root
,
101 struct page_counter
*counter
,
102 bool recursive_protection
);
104 static inline void page_counter_calculate_protection(struct page_counter
*root
,
105 struct page_counter
*counter
,
106 bool recursive_protection
) {}
109 #endif /* _LINUX_PAGE_COUNTER_H */