HID: hiddev: Fix slab-out-of-bounds write in hiddev_ioctl_usage()
[linux/fpc-iii.git] / fs / f2fs / shrinker.c
blobda0d8e0b55a5d851dc893547f63f138b5281eb77
1 /*
2 * f2fs shrinker support
3 * the basic infra was copied from fs/ubifs/shrinker.c
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/fs.h>
13 #include <linux/f2fs_fs.h>
15 #include "f2fs.h"
17 static LIST_HEAD(f2fs_list);
18 static DEFINE_SPINLOCK(f2fs_list_lock);
19 static unsigned int shrinker_run_no;
21 static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
23 return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
26 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
28 if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK)
29 return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK;
30 return 0;
33 static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
35 return sbi->total_ext_tree + atomic_read(&sbi->total_ext_node);
38 unsigned long f2fs_shrink_count(struct shrinker *shrink,
39 struct shrink_control *sc)
41 struct f2fs_sb_info *sbi;
42 struct list_head *p;
43 unsigned long count = 0;
45 spin_lock(&f2fs_list_lock);
46 p = f2fs_list.next;
47 while (p != &f2fs_list) {
48 sbi = list_entry(p, struct f2fs_sb_info, s_list);
50 /* stop f2fs_put_super */
51 if (!mutex_trylock(&sbi->umount_mutex)) {
52 p = p->next;
53 continue;
55 spin_unlock(&f2fs_list_lock);
57 /* count extent cache entries */
58 count += __count_extent_cache(sbi);
60 /* shrink clean nat cache entries */
61 count += __count_nat_entries(sbi);
63 /* count free nids cache entries */
64 count += __count_free_nids(sbi);
66 spin_lock(&f2fs_list_lock);
67 p = p->next;
68 mutex_unlock(&sbi->umount_mutex);
70 spin_unlock(&f2fs_list_lock);
71 return count;
74 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
75 struct shrink_control *sc)
77 unsigned long nr = sc->nr_to_scan;
78 struct f2fs_sb_info *sbi;
79 struct list_head *p;
80 unsigned int run_no;
81 unsigned long freed = 0;
83 spin_lock(&f2fs_list_lock);
84 do {
85 run_no = ++shrinker_run_no;
86 } while (run_no == 0);
87 p = f2fs_list.next;
88 while (p != &f2fs_list) {
89 sbi = list_entry(p, struct f2fs_sb_info, s_list);
91 if (sbi->shrinker_run_no == run_no)
92 break;
94 /* stop f2fs_put_super */
95 if (!mutex_trylock(&sbi->umount_mutex)) {
96 p = p->next;
97 continue;
99 spin_unlock(&f2fs_list_lock);
101 sbi->shrinker_run_no = run_no;
103 /* shrink extent cache entries */
104 freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
106 /* shrink clean nat cache entries */
107 if (freed < nr)
108 freed += try_to_free_nats(sbi, nr - freed);
110 /* shrink free nids cache entries */
111 if (freed < nr)
112 freed += try_to_free_nids(sbi, nr - freed);
114 spin_lock(&f2fs_list_lock);
115 p = p->next;
116 list_move_tail(&sbi->s_list, &f2fs_list);
117 mutex_unlock(&sbi->umount_mutex);
118 if (freed >= nr)
119 break;
121 spin_unlock(&f2fs_list_lock);
122 return freed;
125 void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
127 spin_lock(&f2fs_list_lock);
128 list_add_tail(&sbi->s_list, &f2fs_list);
129 spin_unlock(&f2fs_list_lock);
132 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
134 f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
136 spin_lock(&f2fs_list_lock);
137 list_del(&sbi->s_list);
138 spin_unlock(&f2fs_list_lock);