mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / block / blk-stat.c
blobc52356d90fe3854f05faebda241707a1b112a543
1 /*
2 * Block stat tracking code
4 * Copyright (C) 2016 Jens Axboe
5 */
6 #include <linux/kernel.h>
7 #include <linux/rculist.h>
8 #include <linux/blk-mq.h>
10 #include "blk-stat.h"
11 #include "blk-mq.h"
12 #include "blk.h"
14 #define BLK_RQ_STAT_BATCH 64
16 struct blk_queue_stats {
17 struct list_head callbacks;
18 spinlock_t lock;
19 bool enable_accounting;
22 static void blk_stat_init(struct blk_rq_stat *stat)
24 stat->min = -1ULL;
25 stat->max = stat->nr_samples = stat->mean = 0;
26 stat->batch = stat->nr_batch = 0;
29 static void blk_stat_flush_batch(struct blk_rq_stat *stat)
31 const s32 nr_batch = READ_ONCE(stat->nr_batch);
32 const s32 nr_samples = READ_ONCE(stat->nr_samples);
34 if (!nr_batch)
35 return;
36 if (!nr_samples)
37 stat->mean = div64_s64(stat->batch, nr_batch);
38 else {
39 stat->mean = div64_s64((stat->mean * nr_samples) +
40 stat->batch,
41 nr_batch + nr_samples);
44 stat->nr_samples += nr_batch;
45 stat->nr_batch = stat->batch = 0;
48 static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
50 blk_stat_flush_batch(src);
52 if (!src->nr_samples)
53 return;
55 dst->min = min(dst->min, src->min);
56 dst->max = max(dst->max, src->max);
58 if (!dst->nr_samples)
59 dst->mean = src->mean;
60 else {
61 dst->mean = div64_s64((src->mean * src->nr_samples) +
62 (dst->mean * dst->nr_samples),
63 dst->nr_samples + src->nr_samples);
65 dst->nr_samples += src->nr_samples;
68 static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
70 stat->min = min(stat->min, value);
71 stat->max = max(stat->max, value);
73 if (stat->batch + value < stat->batch ||
74 stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
75 blk_stat_flush_batch(stat);
77 stat->batch += value;
78 stat->nr_batch++;
81 void blk_stat_add(struct request *rq)
83 struct request_queue *q = rq->q;
84 struct blk_stat_callback *cb;
85 struct blk_rq_stat *stat;
86 int bucket;
87 s64 now, value;
89 now = __blk_stat_time(ktime_to_ns(ktime_get()));
90 if (now < blk_stat_time(&rq->issue_stat))
91 return;
93 value = now - blk_stat_time(&rq->issue_stat);
95 blk_throtl_stat_add(rq, value);
97 rcu_read_lock();
98 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
99 if (!blk_stat_is_active(cb))
100 continue;
102 bucket = cb->bucket_fn(rq);
103 if (bucket < 0)
104 continue;
106 stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
107 __blk_stat_add(stat, value);
108 put_cpu_ptr(cb->cpu_stat);
110 rcu_read_unlock();
113 static void blk_stat_timer_fn(unsigned long data)
115 struct blk_stat_callback *cb = (void *)data;
116 unsigned int bucket;
117 int cpu;
119 for (bucket = 0; bucket < cb->buckets; bucket++)
120 blk_stat_init(&cb->stat[bucket]);
122 for_each_online_cpu(cpu) {
123 struct blk_rq_stat *cpu_stat;
125 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
126 for (bucket = 0; bucket < cb->buckets; bucket++) {
127 blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
128 blk_stat_init(&cpu_stat[bucket]);
132 cb->timer_fn(cb);
135 struct blk_stat_callback *
136 blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
137 int (*bucket_fn)(const struct request *),
138 unsigned int buckets, void *data)
140 struct blk_stat_callback *cb;
142 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
143 if (!cb)
144 return NULL;
146 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
147 GFP_KERNEL);
148 if (!cb->stat) {
149 kfree(cb);
150 return NULL;
152 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
153 __alignof__(struct blk_rq_stat));
154 if (!cb->cpu_stat) {
155 kfree(cb->stat);
156 kfree(cb);
157 return NULL;
160 cb->timer_fn = timer_fn;
161 cb->bucket_fn = bucket_fn;
162 cb->data = data;
163 cb->buckets = buckets;
164 setup_timer(&cb->timer, blk_stat_timer_fn, (unsigned long)cb);
166 return cb;
168 EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
170 void blk_stat_add_callback(struct request_queue *q,
171 struct blk_stat_callback *cb)
173 unsigned int bucket;
174 int cpu;
176 for_each_possible_cpu(cpu) {
177 struct blk_rq_stat *cpu_stat;
179 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
180 for (bucket = 0; bucket < cb->buckets; bucket++)
181 blk_stat_init(&cpu_stat[bucket]);
184 spin_lock(&q->stats->lock);
185 list_add_tail_rcu(&cb->list, &q->stats->callbacks);
186 set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
187 spin_unlock(&q->stats->lock);
189 EXPORT_SYMBOL_GPL(blk_stat_add_callback);
191 void blk_stat_remove_callback(struct request_queue *q,
192 struct blk_stat_callback *cb)
194 spin_lock(&q->stats->lock);
195 list_del_rcu(&cb->list);
196 if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
197 clear_bit(QUEUE_FLAG_STATS, &q->queue_flags);
198 spin_unlock(&q->stats->lock);
200 del_timer_sync(&cb->timer);
202 EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
204 static void blk_stat_free_callback_rcu(struct rcu_head *head)
206 struct blk_stat_callback *cb;
208 cb = container_of(head, struct blk_stat_callback, rcu);
209 free_percpu(cb->cpu_stat);
210 kfree(cb->stat);
211 kfree(cb);
214 void blk_stat_free_callback(struct blk_stat_callback *cb)
216 if (cb)
217 call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
219 EXPORT_SYMBOL_GPL(blk_stat_free_callback);
221 void blk_stat_enable_accounting(struct request_queue *q)
223 spin_lock(&q->stats->lock);
224 q->stats->enable_accounting = true;
225 set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
226 spin_unlock(&q->stats->lock);
229 struct blk_queue_stats *blk_alloc_queue_stats(void)
231 struct blk_queue_stats *stats;
233 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
234 if (!stats)
235 return NULL;
237 INIT_LIST_HEAD(&stats->callbacks);
238 spin_lock_init(&stats->lock);
239 stats->enable_accounting = false;
241 return stats;
244 void blk_free_queue_stats(struct blk_queue_stats *stats)
246 if (!stats)
247 return;
249 WARN_ON(!list_empty(&stats->callbacks));
251 kfree(stats);