1 // SPDX-License-Identifier: GPL-2.0
3 * Lockless hierarchical page accounting & limiting
5 * Copyright (C) 2014 Red Hat, Inc., Johannes Weiner
8 #include <linux/page_counter.h>
9 #include <linux/atomic.h>
10 #include <linux/kernel.h>
11 #include <linux/string.h>
12 #include <linux/sched.h>
13 #include <linux/bug.h>
16 static bool track_protection(struct page_counter
*c
)
18 return c
->protection_support
;
21 static void propagate_protected_usage(struct page_counter
*c
,
24 unsigned long protected, old_protected
;
30 protected = min(usage
, READ_ONCE(c
->min
));
31 old_protected
= atomic_long_read(&c
->min_usage
);
32 if (protected != old_protected
) {
33 old_protected
= atomic_long_xchg(&c
->min_usage
, protected);
34 delta
= protected - old_protected
;
36 atomic_long_add(delta
, &c
->parent
->children_min_usage
);
39 protected = min(usage
, READ_ONCE(c
->low
));
40 old_protected
= atomic_long_read(&c
->low_usage
);
41 if (protected != old_protected
) {
42 old_protected
= atomic_long_xchg(&c
->low_usage
, protected);
43 delta
= protected - old_protected
;
45 atomic_long_add(delta
, &c
->parent
->children_low_usage
);
50 * page_counter_cancel - take pages out of the local counter
52 * @nr_pages: number of pages to cancel
54 void page_counter_cancel(struct page_counter
*counter
, unsigned long nr_pages
)
58 new = atomic_long_sub_return(nr_pages
, &counter
->usage
);
59 /* More uncharges than charges? */
60 if (WARN_ONCE(new < 0, "page_counter underflow: %ld nr_pages=%lu\n",
63 atomic_long_set(&counter
->usage
, new);
65 if (track_protection(counter
))
66 propagate_protected_usage(counter
, new);
70 * page_counter_charge - hierarchically charge pages
72 * @nr_pages: number of pages to charge
74 * NOTE: This does not consider any configured counter limits.
76 void page_counter_charge(struct page_counter
*counter
, unsigned long nr_pages
)
78 struct page_counter
*c
;
79 bool protection
= track_protection(counter
);
81 for (c
= counter
; c
; c
= c
->parent
) {
84 new = atomic_long_add_return(nr_pages
, &c
->usage
);
86 propagate_protected_usage(c
, new);
88 * This is indeed racy, but we can live with some
89 * inaccuracy in the watermark.
91 * Notably, we have two watermarks to allow for both a globally
92 * visible peak and one that can be reset at a smaller scope.
94 * Since we reset both watermarks when the global reset occurs,
95 * we can guarantee that watermark >= local_watermark, so we
96 * don't need to do both comparisons every time.
98 * On systems with branch predictors, the inner condition should
101 if (new > READ_ONCE(c
->local_watermark
)) {
102 WRITE_ONCE(c
->local_watermark
, new);
103 if (new > READ_ONCE(c
->watermark
))
104 WRITE_ONCE(c
->watermark
, new);
110 * page_counter_try_charge - try to hierarchically charge pages
112 * @nr_pages: number of pages to charge
113 * @fail: points first counter to hit its limit, if any
115 * Returns %true on success, or %false and @fail if the counter or one
116 * of its ancestors has hit its configured limit.
118 bool page_counter_try_charge(struct page_counter
*counter
,
119 unsigned long nr_pages
,
120 struct page_counter
**fail
)
122 struct page_counter
*c
;
123 bool protection
= track_protection(counter
);
125 for (c
= counter
; c
; c
= c
->parent
) {
128 * Charge speculatively to avoid an expensive CAS. If
129 * a bigger charge fails, it might falsely lock out a
130 * racing smaller charge and send it into reclaim
131 * early, but the error is limited to the difference
132 * between the two sizes, which is less than 2M/4M in
133 * case of a THP locking out a regular page charge.
135 * The atomic_long_add_return() implies a full memory
136 * barrier between incrementing the count and reading
137 * the limit. When racing with page_counter_set_max(),
138 * we either see the new limit or the setter sees the
139 * counter has changed and retries.
141 new = atomic_long_add_return(nr_pages
, &c
->usage
);
143 atomic_long_sub(nr_pages
, &c
->usage
);
145 * This is racy, but we can live with some
146 * inaccuracy in the failcnt which is only used
149 data_race(c
->failcnt
++);
154 propagate_protected_usage(c
, new);
156 /* see comment on page_counter_charge */
157 if (new > READ_ONCE(c
->local_watermark
)) {
158 WRITE_ONCE(c
->local_watermark
, new);
159 if (new > READ_ONCE(c
->watermark
))
160 WRITE_ONCE(c
->watermark
, new);
166 for (c
= counter
; c
!= *fail
; c
= c
->parent
)
167 page_counter_cancel(c
, nr_pages
);
173 * page_counter_uncharge - hierarchically uncharge pages
175 * @nr_pages: number of pages to uncharge
177 void page_counter_uncharge(struct page_counter
*counter
, unsigned long nr_pages
)
179 struct page_counter
*c
;
181 for (c
= counter
; c
; c
= c
->parent
)
182 page_counter_cancel(c
, nr_pages
);
186 * page_counter_set_max - set the maximum number of pages allowed
188 * @nr_pages: limit to set
190 * Returns 0 on success, -EBUSY if the current number of pages on the
191 * counter already exceeds the specified limit.
193 * The caller must serialize invocations on the same counter.
195 int page_counter_set_max(struct page_counter
*counter
, unsigned long nr_pages
)
202 * Update the limit while making sure that it's not
203 * below the concurrently-changing counter value.
205 * The xchg implies two full memory barriers before
206 * and after, so the read-swap-read is ordered and
207 * ensures coherency with page_counter_try_charge():
208 * that function modifies the count before checking
209 * the limit, so if it sees the old limit, we see the
210 * modified counter and retry.
212 usage
= page_counter_read(counter
);
214 if (usage
> nr_pages
)
217 old
= xchg(&counter
->max
, nr_pages
);
219 if (page_counter_read(counter
) <= usage
|| nr_pages
>= old
)
228 * page_counter_set_min - set the amount of protected memory
230 * @nr_pages: value to set
232 * The caller must serialize invocations on the same counter.
234 void page_counter_set_min(struct page_counter
*counter
, unsigned long nr_pages
)
236 struct page_counter
*c
;
238 WRITE_ONCE(counter
->min
, nr_pages
);
240 for (c
= counter
; c
; c
= c
->parent
)
241 propagate_protected_usage(c
, atomic_long_read(&c
->usage
));
245 * page_counter_set_low - set the amount of protected memory
247 * @nr_pages: value to set
249 * The caller must serialize invocations on the same counter.
251 void page_counter_set_low(struct page_counter
*counter
, unsigned long nr_pages
)
253 struct page_counter
*c
;
255 WRITE_ONCE(counter
->low
, nr_pages
);
257 for (c
= counter
; c
; c
= c
->parent
)
258 propagate_protected_usage(c
, atomic_long_read(&c
->usage
));
262 * page_counter_memparse - memparse() for page counter limits
263 * @buf: string to parse
264 * @max: string meaning maximum possible value
265 * @nr_pages: returns the result in number of pages
267 * Returns -EINVAL, or 0 and @nr_pages on success. @nr_pages will be
268 * limited to %PAGE_COUNTER_MAX.
270 int page_counter_memparse(const char *buf
, const char *max
,
271 unsigned long *nr_pages
)
276 if (!strcmp(buf
, max
)) {
277 *nr_pages
= PAGE_COUNTER_MAX
;
281 bytes
= memparse(buf
, &end
);
285 *nr_pages
= min(bytes
/ PAGE_SIZE
, (u64
)PAGE_COUNTER_MAX
);
293 * This function calculates an individual page counter's effective
294 * protection which is derived from its own memory.min/low, its
295 * parent's and siblings' settings, as well as the actual memory
296 * distribution in the tree.
298 * The following rules apply to the effective protection values:
300 * 1. At the first level of reclaim, effective protection is equal to
301 * the declared protection in memory.min and memory.low.
303 * 2. To enable safe delegation of the protection configuration, at
304 * subsequent levels the effective protection is capped to the
305 * parent's effective protection.
307 * 3. To make complex and dynamic subtrees easier to configure, the
308 * user is allowed to overcommit the declared protection at a given
309 * level. If that is the case, the parent's effective protection is
310 * distributed to the children in proportion to how much protection
311 * they have declared and how much of it they are utilizing.
313 * This makes distribution proportional, but also work-conserving:
314 * if one counter claims much more protection than it uses memory,
315 * the unused remainder is available to its siblings.
317 * 4. Conversely, when the declared protection is undercommitted at a
318 * given level, the distribution of the larger parental protection
319 * budget is NOT proportional. A counter's protection from a sibling
320 * is capped to its own memory.min/low setting.
322 * 5. However, to allow protecting recursive subtrees from each other
323 * without having to declare each individual counter's fixed share
324 * of the ancestor's claim to protection, any unutilized -
325 * "floating" - protection from up the tree is distributed in
326 * proportion to each counter's *usage*. This makes the protection
327 * neutral wrt sibling cgroups and lets them compete freely over
328 * the shared parental protection budget, but it protects the
329 * subtree as a whole from neighboring subtrees.
331 * Note that 4. and 5. are not in conflict: 4. is about protecting
332 * against immediate siblings whereas 5. is about protecting against
333 * neighboring subtrees.
335 static unsigned long effective_protection(unsigned long usage
,
336 unsigned long parent_usage
,
337 unsigned long setting
,
338 unsigned long parent_effective
,
339 unsigned long siblings_protected
,
340 bool recursive_protection
)
342 unsigned long protected;
345 protected = min(usage
, setting
);
347 * If all cgroups at this level combined claim and use more
348 * protection than what the parent affords them, distribute
349 * shares in proportion to utilization.
351 * We are using actual utilization rather than the statically
352 * claimed protection in order to be work-conserving: claimed
353 * but unused protection is available to siblings that would
354 * otherwise get a smaller chunk than what they claimed.
356 if (siblings_protected
> parent_effective
)
357 return protected * parent_effective
/ siblings_protected
;
360 * Ok, utilized protection of all children is within what the
361 * parent affords them, so we know whatever this child claims
362 * and utilizes is effectively protected.
364 * If there is unprotected usage beyond this value, reclaim
365 * will apply pressure in proportion to that amount.
367 * If there is unutilized protection, the cgroup will be fully
368 * shielded from reclaim, but we do return a smaller value for
369 * protection than what the group could enjoy in theory. This
370 * is okay. With the overcommit distribution above, effective
371 * protection is always dependent on how memory is actually
372 * consumed among the siblings anyway.
377 * If the children aren't claiming (all of) the protection
378 * afforded to them by the parent, distribute the remainder in
379 * proportion to the (unprotected) memory of each cgroup. That
380 * way, cgroups that aren't explicitly prioritized wrt each
381 * other compete freely over the allowance, but they are
382 * collectively protected from neighboring trees.
384 * We're using unprotected memory for the weight so that if
385 * some cgroups DO claim explicit protection, we don't protect
386 * the same bytes twice.
388 * Check both usage and parent_usage against the respective
389 * protected values. One should imply the other, but they
390 * aren't read atomically - make sure the division is sane.
392 if (!recursive_protection
)
395 if (parent_effective
> siblings_protected
&&
396 parent_usage
> siblings_protected
&&
398 unsigned long unclaimed
;
400 unclaimed
= parent_effective
- siblings_protected
;
401 unclaimed
*= usage
- protected;
402 unclaimed
/= parent_usage
- siblings_protected
;
412 * page_counter_calculate_protection - check if memory consumption is in the normal range
413 * @root: the top ancestor of the sub-tree being checked
414 * @counter: the page_counter the counter to update
415 * @recursive_protection: Whether to use memory_recursiveprot behavior.
417 * Calculates elow/emin thresholds for given page_counter.
419 * WARNING: This function is not stateless! It can only be used as part
420 * of a top-down tree iteration, not for isolated queries.
422 void page_counter_calculate_protection(struct page_counter
*root
,
423 struct page_counter
*counter
,
424 bool recursive_protection
)
426 unsigned long usage
, parent_usage
;
427 struct page_counter
*parent
= counter
->parent
;
430 * Effective values of the reclaim targets are ignored so they
431 * can be stale. Have a look at mem_cgroup_protection for more
433 * TODO: calculation should be more robust so that we do not need
434 * that special casing.
439 usage
= page_counter_read(counter
);
443 if (parent
== root
) {
444 counter
->emin
= READ_ONCE(counter
->min
);
445 counter
->elow
= READ_ONCE(counter
->low
);
449 parent_usage
= page_counter_read(parent
);
451 WRITE_ONCE(counter
->emin
, effective_protection(usage
, parent_usage
,
452 READ_ONCE(counter
->min
),
453 READ_ONCE(parent
->emin
),
454 atomic_long_read(&parent
->children_min_usage
),
455 recursive_protection
));
457 WRITE_ONCE(counter
->elow
, effective_protection(usage
, parent_usage
,
458 READ_ONCE(counter
->low
),
459 READ_ONCE(parent
->elow
),
460 atomic_long_read(&parent
->children_low_usage
),
461 recursive_protection
));
463 #endif /* CONFIG_MEMCG */