Revert "net: ipv4: ip_forward: fix inverted local_df test"
[linux/fpc-iii.git] / mm / thrash.c
blob57ad495dbd54392ff5d1d23125f38f605e4d2763
1 /*
2 * mm/thrash.c
4 * Copyright (C) 2004, Red Hat, Inc.
5 * Copyright (C) 2004, Rik van Riel <riel@redhat.com>
6 * Released under the GPL, see the file COPYING for details.
8 * Simple token based thrashing protection, using the algorithm
9 * described in: http://www.cse.ohio-state.edu/hpcs/WWW/HTML/publications/abs05-1.html
11 * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
12 * Improved algorithm to pass token:
13 * Each task has a priority which is incremented if it contended
14 * for the token in an interval less than its previous attempt.
15 * If the token is acquired, that task's priority is boosted to prevent
16 * the token from bouncing around too often and to let the task make
17 * some progress in its execution.
20 #include <linux/jiffies.h>
21 #include <linux/mm.h>
22 #include <linux/sched.h>
23 #include <linux/swap.h>
24 #include <linux/memcontrol.h>
26 #include <trace/events/vmscan.h>
28 #define TOKEN_AGING_INTERVAL (0xFF)
30 static DEFINE_SPINLOCK(swap_token_lock);
31 struct mm_struct *swap_token_mm;
32 static struct mem_cgroup *swap_token_memcg;
34 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
35 static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
37 struct mem_cgroup *memcg;
39 memcg = try_get_mem_cgroup_from_mm(mm);
40 if (memcg)
41 css_put(mem_cgroup_css(memcg));
43 return memcg;
45 #else
46 static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
48 return NULL;
50 #endif
52 void grab_swap_token(struct mm_struct *mm)
54 int current_interval;
55 unsigned int old_prio = mm->token_priority;
56 static unsigned int global_faults;
57 static unsigned int last_aging;
59 global_faults++;
61 current_interval = global_faults - mm->faultstamp;
63 if (!spin_trylock(&swap_token_lock))
64 return;
66 /* First come first served */
67 if (!swap_token_mm)
68 goto replace_token;
71 * Usually, we don't need priority aging because long interval faults
72 * makes priority decrease quickly. But there is one exception. If the
73 * token owner task is sleeping, it never make long interval faults.
74 * Thus, we need a priority aging mechanism instead. The requirements
75 * of priority aging are
76 * 1) An aging interval is reasonable enough long. Too short aging
77 * interval makes quick swap token lost and decrease performance.
78 * 2) The swap token owner task have to get priority aging even if
79 * it's under sleep.
81 if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
82 swap_token_mm->token_priority /= 2;
83 last_aging = global_faults;
86 if (mm == swap_token_mm) {
87 mm->token_priority += 2;
88 goto update_priority;
91 if (current_interval < mm->last_interval)
92 mm->token_priority++;
93 else {
94 if (likely(mm->token_priority > 0))
95 mm->token_priority--;
98 /* Check if we deserve the token */
99 if (mm->token_priority > swap_token_mm->token_priority)
100 goto replace_token;
102 update_priority:
103 trace_update_swap_token_priority(mm, old_prio, swap_token_mm);
105 out:
106 mm->faultstamp = global_faults;
107 mm->last_interval = current_interval;
108 spin_unlock(&swap_token_lock);
109 return;
111 replace_token:
112 mm->token_priority += 2;
113 trace_replace_swap_token(swap_token_mm, mm);
114 swap_token_mm = mm;
115 swap_token_memcg = swap_token_memcg_from_mm(mm);
116 last_aging = global_faults;
117 goto out;
120 /* Called on process exit. */
121 void __put_swap_token(struct mm_struct *mm)
123 spin_lock(&swap_token_lock);
124 if (likely(mm == swap_token_mm)) {
125 trace_put_swap_token(swap_token_mm);
126 swap_token_mm = NULL;
127 swap_token_memcg = NULL;
129 spin_unlock(&swap_token_lock);
132 static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
134 if (!a)
135 return true;
136 if (!b)
137 return true;
138 if (a == b)
139 return true;
140 return false;
143 void disable_swap_token(struct mem_cgroup *memcg)
145 /* memcg reclaim don't disable unrelated mm token. */
146 if (match_memcg(memcg, swap_token_memcg)) {
147 spin_lock(&swap_token_lock);
148 if (match_memcg(memcg, swap_token_memcg)) {
149 trace_disable_swap_token(swap_token_mm);
150 swap_token_mm = NULL;
151 swap_token_memcg = NULL;
153 spin_unlock(&swap_token_lock);