gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / drivers / md / bcache / stats.c
blob0ca072c20d0d35dacce85e29d1ad8e8762854344
1 /*
2 * bcache stats code
4 * Copyright 2012 Google, Inc.
5 */
7 #include "bcache.h"
8 #include "stats.h"
9 #include "btree.h"
10 #include "sysfs.h"
13 * We keep absolute totals of various statistics, and addionally a set of three
14 * rolling averages.
16 * Every so often, a timer goes off and rescales the rolling averages.
17 * accounting_rescale[] is how many times the timer has to go off before we
18 * rescale each set of numbers; that gets us half lives of 5 minutes, one hour,
19 * and one day.
21 * accounting_delay is how often the timer goes off - 22 times in 5 minutes,
22 * and accounting_weight is what we use to rescale:
24 * pow(31 / 32, 22) ~= 1/2
26 * So that we don't have to increment each set of numbers every time we (say)
27 * get a cache hit, we increment a single atomic_t in acc->collector, and when
28 * the rescale function runs it resets the atomic counter to 0 and adds its
29 * old value to each of the exported numbers.
31 * To reduce rounding error, the numbers in struct cache_stats are all
32 * stored left shifted by 16, and scaled back in the sysfs show() function.
35 static const unsigned DAY_RESCALE = 288;
36 static const unsigned HOUR_RESCALE = 12;
37 static const unsigned FIVE_MINUTE_RESCALE = 1;
38 static const unsigned accounting_delay = (HZ * 300) / 22;
39 static const unsigned accounting_weight = 32;
41 /* sysfs reading/writing */
43 read_attribute(cache_hits);
44 read_attribute(cache_misses);
45 read_attribute(cache_bypass_hits);
46 read_attribute(cache_bypass_misses);
47 read_attribute(cache_hit_ratio);
48 read_attribute(cache_readaheads);
49 read_attribute(cache_miss_collisions);
50 read_attribute(bypassed);
52 SHOW(bch_stats)
54 struct cache_stats *s =
55 container_of(kobj, struct cache_stats, kobj);
56 #define var(stat) (s->stat >> 16)
57 var_print(cache_hits);
58 var_print(cache_misses);
59 var_print(cache_bypass_hits);
60 var_print(cache_bypass_misses);
62 sysfs_print(cache_hit_ratio,
63 DIV_SAFE(var(cache_hits) * 100,
64 var(cache_hits) + var(cache_misses)));
66 var_print(cache_readaheads);
67 var_print(cache_miss_collisions);
68 sysfs_hprint(bypassed, var(sectors_bypassed) << 9);
69 #undef var
70 return 0;
73 STORE(bch_stats)
75 return size;
78 static void bch_stats_release(struct kobject *k)
82 static struct attribute *bch_stats_files[] = {
83 &sysfs_cache_hits,
84 &sysfs_cache_misses,
85 &sysfs_cache_bypass_hits,
86 &sysfs_cache_bypass_misses,
87 &sysfs_cache_hit_ratio,
88 &sysfs_cache_readaheads,
89 &sysfs_cache_miss_collisions,
90 &sysfs_bypassed,
91 NULL
93 static KTYPE(bch_stats);
95 int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
96 struct kobject *parent)
98 int ret = kobject_add(&acc->total.kobj, parent,
99 "stats_total");
100 ret = ret ?: kobject_add(&acc->five_minute.kobj, parent,
101 "stats_five_minute");
102 ret = ret ?: kobject_add(&acc->hour.kobj, parent,
103 "stats_hour");
104 ret = ret ?: kobject_add(&acc->day.kobj, parent,
105 "stats_day");
106 return ret;
109 void bch_cache_accounting_clear(struct cache_accounting *acc)
111 memset(&acc->total.cache_hits,
113 sizeof(unsigned long) * 7);
116 void bch_cache_accounting_destroy(struct cache_accounting *acc)
118 kobject_put(&acc->total.kobj);
119 kobject_put(&acc->five_minute.kobj);
120 kobject_put(&acc->hour.kobj);
121 kobject_put(&acc->day.kobj);
123 atomic_set(&acc->closing, 1);
124 if (del_timer_sync(&acc->timer))
125 closure_return(&acc->cl);
128 /* EWMA scaling */
130 static void scale_stat(unsigned long *stat)
132 *stat = ewma_add(*stat, 0, accounting_weight, 0);
135 static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
137 if (++stats->rescale == rescale_at) {
138 stats->rescale = 0;
139 scale_stat(&stats->cache_hits);
140 scale_stat(&stats->cache_misses);
141 scale_stat(&stats->cache_bypass_hits);
142 scale_stat(&stats->cache_bypass_misses);
143 scale_stat(&stats->cache_readaheads);
144 scale_stat(&stats->cache_miss_collisions);
145 scale_stat(&stats->sectors_bypassed);
149 static void scale_accounting(unsigned long data)
151 struct cache_accounting *acc = (struct cache_accounting *) data;
153 #define move_stat(name) do { \
154 unsigned t = atomic_xchg(&acc->collector.name, 0); \
155 t <<= 16; \
156 acc->five_minute.name += t; \
157 acc->hour.name += t; \
158 acc->day.name += t; \
159 acc->total.name += t; \
160 } while (0)
162 move_stat(cache_hits);
163 move_stat(cache_misses);
164 move_stat(cache_bypass_hits);
165 move_stat(cache_bypass_misses);
166 move_stat(cache_readaheads);
167 move_stat(cache_miss_collisions);
168 move_stat(sectors_bypassed);
170 scale_stats(&acc->total, 0);
171 scale_stats(&acc->day, DAY_RESCALE);
172 scale_stats(&acc->hour, HOUR_RESCALE);
173 scale_stats(&acc->five_minute, FIVE_MINUTE_RESCALE);
175 acc->timer.expires += accounting_delay;
177 if (!atomic_read(&acc->closing))
178 add_timer(&acc->timer);
179 else
180 closure_return(&acc->cl);
183 static void mark_cache_stats(struct cache_stat_collector *stats,
184 bool hit, bool bypass)
186 if (!bypass)
187 if (hit)
188 atomic_inc(&stats->cache_hits);
189 else
190 atomic_inc(&stats->cache_misses);
191 else
192 if (hit)
193 atomic_inc(&stats->cache_bypass_hits);
194 else
195 atomic_inc(&stats->cache_bypass_misses);
198 void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
199 bool hit, bool bypass)
201 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
202 mark_cache_stats(&dc->accounting.collector, hit, bypass);
203 mark_cache_stats(&c->accounting.collector, hit, bypass);
206 void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
208 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
209 atomic_inc(&dc->accounting.collector.cache_readaheads);
210 atomic_inc(&c->accounting.collector.cache_readaheads);
213 void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
215 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
216 atomic_inc(&dc->accounting.collector.cache_miss_collisions);
217 atomic_inc(&c->accounting.collector.cache_miss_collisions);
220 void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc,
221 int sectors)
223 atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
224 atomic_add(sectors, &c->accounting.collector.sectors_bypassed);
227 void bch_cache_accounting_init(struct cache_accounting *acc,
228 struct closure *parent)
230 kobject_init(&acc->total.kobj, &bch_stats_ktype);
231 kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
232 kobject_init(&acc->hour.kobj, &bch_stats_ktype);
233 kobject_init(&acc->day.kobj, &bch_stats_ktype);
235 closure_init(&acc->cl, parent);
236 init_timer(&acc->timer);
237 acc->timer.expires = jiffies + accounting_delay;
238 acc->timer.data = (unsigned long) acc;
239 acc->timer.function = scale_accounting;
240 add_timer(&acc->timer);