2 * Manage cache of swap slots to be used for and returned from
5 * Copyright(c) 2016 Intel Corporation.
7 * Author: Tim Chen <tim.c.chen@linux.intel.com>
9 * We allocate the swap slots from the global pool and put
10 * it into local per cpu caches. This has the advantage
11 * of no needing to acquire the swap_info lock every time
14 * There is also opportunity to simply return the slot
15 * to local caches without needing to acquire swap_info
16 * lock. We do not reuse the returned slots directly but
17 * move them back to the global pool in a batch. This
18 * allows the slots to coaellesce and reduce fragmentation.
20 * The swap entry allocated is marked with SWAP_HAS_CACHE
21 * flag in map_count that prevents it from being allocated
22 * again from the global pool.
24 * The swap slots cache is protected by a mutex instead of
25 * a spin lock as when we search for slots with scan_swap_map,
26 * we can possibly sleep.
29 #include <linux/swap_slots.h>
30 #include <linux/cpu.h>
31 #include <linux/cpumask.h>
32 #include <linux/vmalloc.h>
33 #include <linux/mutex.h>
37 static DEFINE_PER_CPU(struct swap_slots_cache
, swp_slots
);
38 static bool swap_slot_cache_active
;
39 bool swap_slot_cache_enabled
;
40 static bool swap_slot_cache_initialized
;
41 DEFINE_MUTEX(swap_slots_cache_mutex
);
42 /* Serialize swap slots cache enable/disable operations */
43 DEFINE_MUTEX(swap_slots_cache_enable_mutex
);
45 static void __drain_swap_slots_cache(unsigned int type
);
46 static void deactivate_swap_slots_cache(void);
47 static void reactivate_swap_slots_cache(void);
49 #define use_swap_slot_cache (swap_slot_cache_active && \
50 swap_slot_cache_enabled && swap_slot_cache_initialized)
51 #define SLOTS_CACHE 0x1
52 #define SLOTS_CACHE_RET 0x2
54 static void deactivate_swap_slots_cache(void)
56 mutex_lock(&swap_slots_cache_mutex
);
57 swap_slot_cache_active
= false;
58 __drain_swap_slots_cache(SLOTS_CACHE
|SLOTS_CACHE_RET
);
59 mutex_unlock(&swap_slots_cache_mutex
);
62 static void reactivate_swap_slots_cache(void)
64 mutex_lock(&swap_slots_cache_mutex
);
65 swap_slot_cache_active
= true;
66 mutex_unlock(&swap_slots_cache_mutex
);
69 /* Must not be called with cpu hot plug lock */
70 void disable_swap_slots_cache_lock(void)
72 mutex_lock(&swap_slots_cache_enable_mutex
);
73 swap_slot_cache_enabled
= false;
74 if (swap_slot_cache_initialized
) {
75 /* serialize with cpu hotplug operations */
77 __drain_swap_slots_cache(SLOTS_CACHE
|SLOTS_CACHE_RET
);
82 static void __reenable_swap_slots_cache(void)
84 swap_slot_cache_enabled
= has_usable_swap();
87 void reenable_swap_slots_cache_unlock(void)
89 __reenable_swap_slots_cache();
90 mutex_unlock(&swap_slots_cache_enable_mutex
);
93 static bool check_cache_active(void)
97 if (!swap_slot_cache_enabled
|| !swap_slot_cache_initialized
)
100 pages
= get_nr_swap_pages();
101 if (!swap_slot_cache_active
) {
102 if (pages
> num_online_cpus() *
103 THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE
)
104 reactivate_swap_slots_cache();
108 /* if global pool of slot caches too low, deactivate cache */
109 if (pages
< num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE
)
110 deactivate_swap_slots_cache();
112 return swap_slot_cache_active
;
115 static int alloc_swap_slot_cache(unsigned int cpu
)
117 struct swap_slots_cache
*cache
;
118 swp_entry_t
*slots
, *slots_ret
;
121 * Do allocation outside swap_slots_cache_mutex
122 * as vzalloc could trigger reclaim and get_swap_page,
123 * which can lock swap_slots_cache_mutex.
125 slots
= vzalloc(sizeof(swp_entry_t
) * SWAP_SLOTS_CACHE_SIZE
);
129 slots_ret
= vzalloc(sizeof(swp_entry_t
) * SWAP_SLOTS_CACHE_SIZE
);
135 mutex_lock(&swap_slots_cache_mutex
);
136 cache
= &per_cpu(swp_slots
, cpu
);
137 if (cache
->slots
|| cache
->slots_ret
)
138 /* cache already allocated */
140 if (!cache
->lock_initialized
) {
141 mutex_init(&cache
->alloc_lock
);
142 spin_lock_init(&cache
->free_lock
);
143 cache
->lock_initialized
= true;
148 cache
->slots
= slots
;
150 cache
->slots_ret
= slots_ret
;
153 mutex_unlock(&swap_slots_cache_mutex
);
161 static void drain_slots_cache_cpu(unsigned int cpu
, unsigned int type
,
164 struct swap_slots_cache
*cache
;
165 swp_entry_t
*slots
= NULL
;
167 cache
= &per_cpu(swp_slots
, cpu
);
168 if ((type
& SLOTS_CACHE
) && cache
->slots
) {
169 mutex_lock(&cache
->alloc_lock
);
170 swapcache_free_entries(cache
->slots
+ cache
->cur
, cache
->nr
);
173 if (free_slots
&& cache
->slots
) {
177 mutex_unlock(&cache
->alloc_lock
);
179 if ((type
& SLOTS_CACHE_RET
) && cache
->slots_ret
) {
180 spin_lock_irq(&cache
->free_lock
);
181 swapcache_free_entries(cache
->slots_ret
, cache
->n_ret
);
183 if (free_slots
&& cache
->slots_ret
) {
184 slots
= cache
->slots_ret
;
185 cache
->slots_ret
= NULL
;
187 spin_unlock_irq(&cache
->free_lock
);
193 static void __drain_swap_slots_cache(unsigned int type
)
198 * This function is called during
199 * 1) swapoff, when we have to make sure no
200 * left over slots are in cache when we remove
202 * 2) disabling of swap slot cache, when we run low
203 * on swap slots when allocating memory and need
204 * to return swap slots to global pool.
206 * We cannot acquire cpu hot plug lock here as
207 * this function can be invoked in the cpu
209 * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback
210 * -> memory allocation -> direct reclaim -> get_swap_page
211 * -> drain_swap_slots_cache
213 * Hence the loop over current online cpu below could miss cpu that
214 * is being brought online but not yet marked as online.
215 * That is okay as we do not schedule and run anything on a
216 * cpu before it has been marked online. Hence, we will not
217 * fill any swap slots in slots cache of such cpu.
218 * There are no slots on such cpu that need to be drained.
220 for_each_online_cpu(cpu
)
221 drain_slots_cache_cpu(cpu
, type
, false);
224 static int free_slot_cache(unsigned int cpu
)
226 mutex_lock(&swap_slots_cache_mutex
);
227 drain_slots_cache_cpu(cpu
, SLOTS_CACHE
| SLOTS_CACHE_RET
, true);
228 mutex_unlock(&swap_slots_cache_mutex
);
232 int enable_swap_slots_cache(void)
236 mutex_lock(&swap_slots_cache_enable_mutex
);
237 if (swap_slot_cache_initialized
) {
238 __reenable_swap_slots_cache();
242 ret
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "swap_slots_cache",
243 alloc_swap_slot_cache
, free_slot_cache
);
246 swap_slot_cache_initialized
= true;
247 __reenable_swap_slots_cache();
249 mutex_unlock(&swap_slots_cache_enable_mutex
);
253 /* called with swap slot cache's alloc lock held */
254 static int refill_swap_slots_cache(struct swap_slots_cache
*cache
)
256 if (!use_swap_slot_cache
|| cache
->nr
)
260 if (swap_slot_cache_active
)
261 cache
->nr
= get_swap_pages(SWAP_SLOTS_CACHE_SIZE
, cache
->slots
);
266 int free_swap_slot(swp_entry_t entry
)
268 struct swap_slots_cache
*cache
;
270 cache
= &get_cpu_var(swp_slots
);
271 if (use_swap_slot_cache
&& cache
->slots_ret
) {
272 spin_lock_irq(&cache
->free_lock
);
273 /* Swap slots cache may be deactivated before acquiring lock */
274 if (!use_swap_slot_cache
) {
275 spin_unlock_irq(&cache
->free_lock
);
278 if (cache
->n_ret
>= SWAP_SLOTS_CACHE_SIZE
) {
280 * Return slots to global pool.
281 * The current swap_map value is SWAP_HAS_CACHE.
282 * Set it to 0 to indicate it is available for
283 * allocation in global pool
285 swapcache_free_entries(cache
->slots_ret
, cache
->n_ret
);
288 cache
->slots_ret
[cache
->n_ret
++] = entry
;
289 spin_unlock_irq(&cache
->free_lock
);
292 swapcache_free_entries(&entry
, 1);
294 put_cpu_var(swp_slots
);
299 swp_entry_t
get_swap_page(void)
301 swp_entry_t entry
, *pentry
;
302 struct swap_slots_cache
*cache
;
305 * Preemption is allowed here, because we may sleep
306 * in refill_swap_slots_cache(). But it is safe, because
307 * accesses to the per-CPU data structure are protected by the
308 * mutex cache->alloc_lock.
310 * The alloc path here does not touch cache->slots_ret
311 * so cache->free_lock is not taken.
313 cache
= raw_cpu_ptr(&swp_slots
);
316 if (check_cache_active()) {
317 mutex_lock(&cache
->alloc_lock
);
321 pentry
= &cache
->slots
[cache
->cur
++];
326 if (refill_swap_slots_cache(cache
))
330 mutex_unlock(&cache
->alloc_lock
);
335 get_swap_pages(1, &entry
);
340 #endif /* CONFIG_SWAP */