2 * Block stat tracking code
4 * Copyright (C) 2016 Jens Axboe
6 #include <linux/kernel.h>
7 #include <linux/rculist.h>
8 #include <linux/blk-mq.h>
14 struct blk_queue_stats
{
15 struct list_head callbacks
;
17 bool enable_accounting
;
20 static void blk_stat_init(struct blk_rq_stat
*stat
)
23 stat
->max
= stat
->nr_samples
= stat
->mean
= 0;
27 /* src is a per-cpu stat, mean isn't initialized */
28 static void blk_stat_sum(struct blk_rq_stat
*dst
, struct blk_rq_stat
*src
)
33 dst
->min
= min(dst
->min
, src
->min
);
34 dst
->max
= max(dst
->max
, src
->max
);
36 dst
->mean
= div_u64(src
->batch
+ dst
->mean
* dst
->nr_samples
,
37 dst
->nr_samples
+ src
->nr_samples
);
39 dst
->nr_samples
+= src
->nr_samples
;
42 static void __blk_stat_add(struct blk_rq_stat
*stat
, u64 value
)
44 stat
->min
= min(stat
->min
, value
);
45 stat
->max
= max(stat
->max
, value
);
50 void blk_stat_add(struct request
*rq
)
52 struct request_queue
*q
= rq
->q
;
53 struct blk_stat_callback
*cb
;
54 struct blk_rq_stat
*stat
;
58 now
= __blk_stat_time(ktime_to_ns(ktime_get()));
59 if (now
< blk_stat_time(&rq
->issue_stat
))
62 value
= now
- blk_stat_time(&rq
->issue_stat
);
64 blk_throtl_stat_add(rq
, value
);
67 list_for_each_entry_rcu(cb
, &q
->stats
->callbacks
, list
) {
68 if (!blk_stat_is_active(cb
))
71 bucket
= cb
->bucket_fn(rq
);
75 stat
= &get_cpu_ptr(cb
->cpu_stat
)[bucket
];
76 __blk_stat_add(stat
, value
);
77 put_cpu_ptr(cb
->cpu_stat
);
82 static void blk_stat_timer_fn(struct timer_list
*t
)
84 struct blk_stat_callback
*cb
= from_timer(cb
, t
, timer
);
88 for (bucket
= 0; bucket
< cb
->buckets
; bucket
++)
89 blk_stat_init(&cb
->stat
[bucket
]);
91 for_each_online_cpu(cpu
) {
92 struct blk_rq_stat
*cpu_stat
;
94 cpu_stat
= per_cpu_ptr(cb
->cpu_stat
, cpu
);
95 for (bucket
= 0; bucket
< cb
->buckets
; bucket
++) {
96 blk_stat_sum(&cb
->stat
[bucket
], &cpu_stat
[bucket
]);
97 blk_stat_init(&cpu_stat
[bucket
]);
104 struct blk_stat_callback
*
105 blk_stat_alloc_callback(void (*timer_fn
)(struct blk_stat_callback
*),
106 int (*bucket_fn
)(const struct request
*),
107 unsigned int buckets
, void *data
)
109 struct blk_stat_callback
*cb
;
111 cb
= kmalloc(sizeof(*cb
), GFP_KERNEL
);
115 cb
->stat
= kmalloc_array(buckets
, sizeof(struct blk_rq_stat
),
121 cb
->cpu_stat
= __alloc_percpu(buckets
* sizeof(struct blk_rq_stat
),
122 __alignof__(struct blk_rq_stat
));
129 cb
->timer_fn
= timer_fn
;
130 cb
->bucket_fn
= bucket_fn
;
132 cb
->buckets
= buckets
;
133 timer_setup(&cb
->timer
, blk_stat_timer_fn
, 0);
137 EXPORT_SYMBOL_GPL(blk_stat_alloc_callback
);
139 void blk_stat_add_callback(struct request_queue
*q
,
140 struct blk_stat_callback
*cb
)
145 for_each_possible_cpu(cpu
) {
146 struct blk_rq_stat
*cpu_stat
;
148 cpu_stat
= per_cpu_ptr(cb
->cpu_stat
, cpu
);
149 for (bucket
= 0; bucket
< cb
->buckets
; bucket
++)
150 blk_stat_init(&cpu_stat
[bucket
]);
153 spin_lock(&q
->stats
->lock
);
154 list_add_tail_rcu(&cb
->list
, &q
->stats
->callbacks
);
155 set_bit(QUEUE_FLAG_STATS
, &q
->queue_flags
);
156 spin_unlock(&q
->stats
->lock
);
158 EXPORT_SYMBOL_GPL(blk_stat_add_callback
);
160 void blk_stat_remove_callback(struct request_queue
*q
,
161 struct blk_stat_callback
*cb
)
163 spin_lock(&q
->stats
->lock
);
164 list_del_rcu(&cb
->list
);
165 if (list_empty(&q
->stats
->callbacks
) && !q
->stats
->enable_accounting
)
166 clear_bit(QUEUE_FLAG_STATS
, &q
->queue_flags
);
167 spin_unlock(&q
->stats
->lock
);
169 del_timer_sync(&cb
->timer
);
171 EXPORT_SYMBOL_GPL(blk_stat_remove_callback
);
173 static void blk_stat_free_callback_rcu(struct rcu_head
*head
)
175 struct blk_stat_callback
*cb
;
177 cb
= container_of(head
, struct blk_stat_callback
, rcu
);
178 free_percpu(cb
->cpu_stat
);
183 void blk_stat_free_callback(struct blk_stat_callback
*cb
)
186 call_rcu(&cb
->rcu
, blk_stat_free_callback_rcu
);
188 EXPORT_SYMBOL_GPL(blk_stat_free_callback
);
190 void blk_stat_enable_accounting(struct request_queue
*q
)
192 spin_lock(&q
->stats
->lock
);
193 q
->stats
->enable_accounting
= true;
194 set_bit(QUEUE_FLAG_STATS
, &q
->queue_flags
);
195 spin_unlock(&q
->stats
->lock
);
198 struct blk_queue_stats
*blk_alloc_queue_stats(void)
200 struct blk_queue_stats
*stats
;
202 stats
= kmalloc(sizeof(*stats
), GFP_KERNEL
);
206 INIT_LIST_HEAD(&stats
->callbacks
);
207 spin_lock_init(&stats
->lock
);
208 stats
->enable_accounting
= false;
213 void blk_free_queue_stats(struct blk_queue_stats
*stats
)
218 WARN_ON(!list_empty(&stats
->callbacks
));